aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml247
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md6
-rw-r--r--.golangci.yml6
-rw-r--r--CONTRIBUTING.md10
-rw-r--r--Containerfile-testvol10
-rw-r--r--Makefile84
-rw-r--r--README.md14
-rw-r--r--RELEASE_NOTES.md45
-rw-r--r--RELEASE_PROCESS.md2
-rw-r--r--cmd/podman-mac-helper/install.go18
-rw-r--r--cmd/podman-mac-helper/main.go8
-rw-r--r--cmd/podman-mac-helper/uninstall.go5
-rw-r--r--cmd/podman/auto-update.go3
-rw-r--r--cmd/podman/common/completion.go347
-rw-r--r--cmd/podman/common/completion_test.go72
-rw-r--r--cmd/podman/common/create.go83
-rw-r--r--cmd/podman/common/create_opts.go491
-rw-r--r--cmd/podman/common/default.go3
-rw-r--r--cmd/podman/common/diffChanges.go3
-rw-r--r--cmd/podman/common/netflags.go27
-rw-r--r--cmd/podman/containers/attach.go4
-rw-r--r--cmd/podman/containers/checkpoint.go14
-rw-r--r--cmd/podman/containers/cleanup.go12
-rw-r--r--cmd/podman/containers/clone.go8
-rw-r--r--cmd/podman/containers/commit.go3
-rw-r--r--cmd/podman/containers/cp.go61
-rw-r--r--cmd/podman/containers/create.go57
-rw-r--r--cmd/podman/containers/diff.go9
-rw-r--r--cmd/podman/containers/exec.go6
-rw-r--r--cmd/podman/containers/export.go4
-rw-r--r--cmd/podman/containers/init.go2
-rw-r--r--cmd/podman/containers/kill.go8
-rw-r--r--cmd/podman/containers/logs.go7
-rw-r--r--cmd/podman/containers/mount.go8
-rw-r--r--cmd/podman/containers/pause.go4
-rw-r--r--cmd/podman/containers/port.go16
-rw-r--r--cmd/podman/containers/ps.go10
-rw-r--r--cmd/podman/containers/rename.go5
-rw-r--r--cmd/podman/containers/restart.go7
-rw-r--r--cmd/podman/containers/restore.go2
-rw-r--r--cmd/podman/containers/rm.go23
-rw-r--r--cmd/podman/containers/run.go25
-rw-r--r--cmd/podman/containers/start.go16
-rw-r--r--cmd/podman/containers/stats.go20
-rw-r--r--cmd/podman/containers/stop.go7
-rw-r--r--cmd/podman/containers/top.go4
-rw-r--r--cmd/podman/containers/unmount.go4
-rw-r--r--cmd/podman/containers/unpause.go4
-rw-r--r--cmd/podman/containers/wait.go4
-rw-r--r--cmd/podman/diff.go3
-rw-r--r--cmd/podman/diff/diff.go10
-rw-r--r--cmd/podman/early_init_linux.go5
-rw-r--r--cmd/podman/generate/kube.go5
-rw-r--r--cmd/podman/generate/systemd.go6
-rw-r--r--cmd/podman/images/build.go77
-rw-r--r--cmd/podman/images/diff.go4
-rw-r--r--cmd/podman/images/history.go3
-rw-r--r--cmd/podman/images/import.go10
-rw-r--r--cmd/podman/images/list.go4
-rw-r--r--cmd/podman/images/load.go10
-rw-r--r--cmd/podman/images/mount.go19
-rw-r--r--cmd/podman/images/pull.go4
-rw-r--r--cmd/podman/images/push.go1
-rw-r--r--cmd/podman/images/rm.go6
-rw-r--r--cmd/podman/images/save.go13
-rw-r--r--cmd/podman/images/scp.go302
-rw-r--r--cmd/podman/images/scp_test.go46
-rw-r--r--cmd/podman/images/scp_utils.go88
-rw-r--r--cmd/podman/images/search.go13
-rw-r--r--cmd/podman/images/sign.go4
-rw-r--r--cmd/podman/images/trust_set.go12
-rw-r--r--cmd/podman/images/unmount.go2
-rw-r--r--cmd/podman/images/utils_linux.go4
-rw-r--r--cmd/podman/inspect/inspect.go31
-rw-r--r--cmd/podman/machine/info.go182
-rw-r--r--cmd/podman/machine/init.go28
-rw-r--r--cmd/podman/machine/inspect.go1
-rw-r--r--cmd/podman/machine/list.go52
-rw-r--r--cmd/podman/machine/machine.go20
-rw-r--r--cmd/podman/machine/machine_unix.go11
-rw-r--r--cmd/podman/machine/machine_windows.go9
-rw-r--r--cmd/podman/machine/rm.go1
-rw-r--r--cmd/podman/machine/set.go1
-rw-r--r--cmd/podman/machine/ssh.go17
-rw-r--r--cmd/podman/machine/start.go6
-rw-r--r--cmd/podman/machine/stop.go1
-rw-r--r--cmd/podman/manifest/push.go6
-rw-r--r--cmd/podman/manifest/remove.go3
-rw-r--r--cmd/podman/networks/create.go10
-rw-r--r--cmd/podman/networks/reload.go2
-rw-r--r--cmd/podman/networks/rm.go12
-rw-r--r--cmd/podman/parse/filters.go5
-rw-r--r--cmd/podman/parse/net.go50
-rw-r--r--cmd/podman/parse/net_test.go2
-rw-r--r--cmd/podman/play/kube.go12
-rw-r--r--cmd/podman/pods/clone.go92
-rw-r--r--cmd/podman/pods/create.go40
-rw-r--r--cmd/podman/pods/inspect.go6
-rw-r--r--cmd/podman/pods/kill.go2
-rw-r--r--cmd/podman/pods/logs.go9
-rw-r--r--cmd/podman/pods/pause.go2
-rw-r--r--cmd/podman/pods/ps.go13
-rw-r--r--cmd/podman/pods/restart.go2
-rw-r--r--cmd/podman/pods/rm.go10
-rw-r--r--cmd/podman/pods/start.go2
-rw-r--r--cmd/podman/pods/stop.go2
-rw-r--r--cmd/podman/pods/top.go4
-rw-r--r--cmd/podman/pods/unpause.go8
-rw-r--r--cmd/podman/registry/config.go9
-rw-r--r--cmd/podman/root.go44
-rw-r--r--cmd/podman/root_test.go4
-rw-r--r--cmd/podman/secrets/create.go4
-rw-r--r--cmd/podman/secrets/inspect.go3
-rw-r--r--cmd/podman/secrets/list.go4
-rw-r--r--cmd/podman/system/connection/add.go125
-rw-r--r--cmd/podman/system/connection/remove.go3
-rw-r--r--cmd/podman/system/connection/shared.go27
-rw-r--r--cmd/podman/system/df.go50
-rw-r--r--cmd/podman/system/dial_stdio.go12
-rw-r--r--cmd/podman/system/prune.go6
-rw-r--r--cmd/podman/system/reset.go14
-rw-r--r--cmd/podman/system/service_abi.go38
-rw-r--r--cmd/podman/system/unshare.go27
-rw-r--r--cmd/podman/utils/error.go32
-rw-r--r--cmd/podman/utils/signals_linux.go15
-rw-r--r--cmd/podman/utils/signals_windows.go15
-rw-r--r--cmd/podman/utils/utils.go19
-rw-r--r--cmd/podman/validate/args.go113
-rw-r--r--cmd/podman/volumes/create.go9
-rw-r--r--cmd/podman/volumes/export.go8
-rw-r--r--cmd/podman/volumes/import.go9
-rw-r--r--cmd/podman/volumes/inspect.go3
-rw-r--r--cmd/podman/volumes/list.go4
-rw-r--r--cmd/podman/volumes/reload.go52
-rw-r--r--cmd/podman/volumes/rm.go12
-rw-r--r--cmd/rootlessport/main.go14
-rw-r--r--cmd/rootlessport/wsl_test.go2
-rw-r--r--cmd/winpath/main.go20
-rw-r--r--commands-demo.md2
-rw-r--r--completions/bash/podman177
-rw-r--r--completions/bash/podman-remote177
-rw-r--r--completions/fish/podman-remote.fish3
-rw-r--r--completions/fish/podman.fish3
-rw-r--r--completions/powershell/podman-remote.ps15
-rw-r--r--completions/powershell/podman.ps15
-rw-r--r--completions/zsh/_podman32
-rw-r--r--completions/zsh/_podman-remote32
-rw-r--r--contrib/cirrus/CIModes.md129
-rwxr-xr-xcontrib/cirrus/cirrus_yaml_test.py2
-rwxr-xr-xcontrib/cirrus/ext_svc_check.sh21
-rw-r--r--contrib/cirrus/lib.sh13
-rwxr-xr-xcontrib/cirrus/logformatter53
-rwxr-xr-xcontrib/cirrus/runner.sh21
-rwxr-xr-xcontrib/cirrus/setup_environment.sh38
-rw-r--r--contrib/msi/podman.wxs5
-rw-r--r--contrib/podmanimage/README.md12
-rw-r--r--contrib/podmanimage/stable/Containerfile58
-rw-r--r--contrib/podmanimage/stable/Dockerfile36
-rw-r--r--contrib/podmanimage/testing/Containerfile63
-rw-r--r--contrib/podmanimage/testing/Dockerfile36
-rw-r--r--contrib/podmanimage/upstream/Containerfile64
-rw-r--r--contrib/podmanimage/upstream/Dockerfile85
-rw-r--r--contrib/systemd/system/podman-kube@.service.in (renamed from contrib/systemd/system/podman-play-kube@.service.in)0
-rw-r--r--contrib/systemd/system/podman-restart.service.in3
-rw-r--r--docs/play_kube_support.md175
-rwxr-xr-xdocs/remote-docs.sh13
-rw-r--r--docs/source/Tutorials.rst6
-rw-r--r--docs/source/markdown/podman-build.1.md95
-rw-r--r--docs/source/markdown/podman-container-cleanup.1.md8
-rw-r--r--docs/source/markdown/podman-container-clone.1.md6
-rw-r--r--docs/source/markdown/podman-container.1.md2
-rw-r--r--docs/source/markdown/podman-cp.1.md4
-rw-r--r--docs/source/markdown/podman-create.1.md96
-rw-r--r--docs/source/markdown/podman-exec.1.md7
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md11
-rw-r--r--docs/source/markdown/podman-image-scp.1.md12
-rw-r--r--docs/source/markdown/podman-kill.1.md2
-rw-r--r--docs/source/markdown/podman-machine-info.1.md36
-rw-r--r--docs/source/markdown/podman-machine-init.1.md31
-rw-r--r--docs/source/markdown/podman-machine-inspect.1.md2
-rw-r--r--docs/source/markdown/podman-machine-list.1.md9
-rw-r--r--docs/source/markdown/podman-machine-rm.1.md1
-rw-r--r--docs/source/markdown/podman-machine-set.1.md2
-rw-r--r--docs/source/markdown/podman-machine-ssh.1.md32
-rw-r--r--docs/source/markdown/podman-machine-start.1.md7
-rw-r--r--docs/source/markdown/podman-machine-stop.1.md7
-rw-r--r--docs/source/markdown/podman-machine.1.md12
-rw-r--r--docs/source/markdown/podman-network-create.1.md2
-rw-r--r--docs/source/markdown/podman-network-ls.1.md3
-rw-r--r--docs/source/markdown/podman-play-kube.1.md19
-rw-r--r--docs/source/markdown/podman-pod-clone.1.md443
-rw-r--r--docs/source/markdown/podman-pod-create.1.md102
-rw-r--r--docs/source/markdown/podman-pod.1.md1
-rw-r--r--docs/source/markdown/podman-port.1.md2
-rw-r--r--docs/source/markdown/podman-push.1.md2
-rw-r--r--docs/source/markdown/podman-run.1.md91
-rw-r--r--docs/source/markdown/podman-system-prune.1.md6
-rw-r--r--docs/source/markdown/podman-system-service.1.md4
-rw-r--r--docs/source/markdown/podman-system.1.md20
-rw-r--r--docs/source/markdown/podman-volume-create.1.md9
-rw-r--r--docs/source/markdown/podman-volume-import.1.md6
-rw-r--r--docs/source/markdown/podman-volume-reload.1.md29
-rw-r--r--docs/source/markdown/podman-volume.1.md3
-rw-r--r--docs/standalone-styling.css603
-rw-r--r--docs/tutorials/README.md6
-rw-r--r--docs/tutorials/basic_networking.md31
-rw-r--r--docs/tutorials/mac_experimental.md2
-rw-r--r--docs/tutorials/podman-for-windows.md2
-rw-r--r--docs/tutorials/podman_tutorial.md2
-rw-r--r--docs/tutorials/remote_client.md9
-rw-r--r--go.mod33
-rw-r--r--go.sum124
-rwxr-xr-xhack/golangci-lint.sh5
-rwxr-xr-xhack/install_golangci.sh7
-rwxr-xr-xhack/podman-registry24
-rw-r--r--hack/podman-registry-go/registry.go16
-rw-r--r--libpod/boltdb_state.go632
-rw-r--r--libpod/boltdb_state_internal.go200
-rw-r--r--libpod/boltdb_state_linux.go5
-rw-r--r--libpod/container.go101
-rw-r--r--libpod/container_api.go201
-rw-r--r--libpod/container_commit.go10
-rw-r--r--libpod/container_config.go19
-rw-r--r--libpod/container_copy_linux.go21
-rw-r--r--libpod/container_exec.go115
-rw-r--r--libpod/container_graph.go14
-rw-r--r--libpod/container_inspect.go34
-rw-r--r--libpod/container_internal.go336
-rw-r--r--libpod/container_internal_linux.go513
-rw-r--r--libpod/container_log.go22
-rw-r--r--libpod/container_log_linux.go33
-rw-r--r--libpod/container_log_unsupported.go6
-rw-r--r--libpod/container_path_resolution.go4
-rw-r--r--libpod/container_stat_linux.go9
-rw-r--r--libpod/container_top_linux.go16
-rw-r--r--libpod/container_validate.go39
-rw-r--r--libpod/define/container_inspect.go4
-rw-r--r--libpod/define/containerstate.go5
-rw-r--r--libpod/define/errors.go7
-rw-r--r--libpod/define/exec_codes.go6
-rw-r--r--libpod/define/healthchecks.go10
-rw-r--r--libpod/define/info.go12
-rw-r--r--libpod/define/pod_inspect.go5
-rw-r--r--libpod/define/terminal.go7
-rw-r--r--libpod/define/volume_inspect.go8
-rw-r--r--libpod/diff.go5
-rw-r--r--libpod/events.go18
-rw-r--r--libpod/events/config.go7
-rw-r--r--libpod/events/events.go14
-rw-r--r--libpod/events/events_linux.go6
-rw-r--r--libpod/events/events_unsupported.go2
-rw-r--r--libpod/events/filters.go10
-rw-r--r--libpod/events/journal_linux.go30
-rw-r--r--libpod/events/logfile.go6
-rw-r--r--libpod/healthcheck.go61
-rw-r--r--libpod/healthcheck_linux.go81
-rw-r--r--libpod/info.go105
-rw-r--r--libpod/kube.go39
-rw-r--r--libpod/lock/file/file_lock.go34
-rw-r--r--libpod/lock/in_memory_locks.go14
-rw-r--r--libpod/lock/shm/shm_lock.go35
-rw-r--r--libpod/lock/shm_lock_manager_linux.go10
-rw-r--r--libpod/logs/log.go12
-rw-r--r--libpod/logs/reversereader/reversereader.go8
-rw-r--r--libpod/networking_linux.go176
-rw-r--r--libpod/networking_slirp4netns.go104
-rw-r--r--libpod/oci.go39
-rw-r--r--libpod/oci_conmon_attach_linux.go (renamed from libpod/oci_attach_linux.go)70
-rw-r--r--libpod/oci_conmon_exec_linux.go71
-rw-r--r--libpod/oci_conmon_linux.go324
-rw-r--r--libpod/oci_missing.go19
-rw-r--r--libpod/oci_util.go25
-rw-r--r--libpod/options.go100
-rw-r--r--libpod/plugin/volume_api.go104
-rw-r--r--libpod/pod.go61
-rw-r--r--libpod/pod_api.go58
-rw-r--r--libpod/pod_internal.go9
-rw-r--r--libpod/pod_top_linux.go2
-rw-r--r--libpod/reset.go86
-rw-r--r--libpod/runtime.go199
-rw-r--r--libpod/runtime_cstorage.go27
-rw-r--r--libpod/runtime_ctr.go121
-rw-r--r--libpod/runtime_img.go13
-rw-r--r--libpod/runtime_migrate.go15
-rw-r--r--libpod/runtime_pod.go9
-rw-r--r--libpod/runtime_pod_linux.go77
-rw-r--r--libpod/runtime_renumber.go9
-rw-r--r--libpod/runtime_volume.go6
-rw-r--r--libpod/runtime_volume_linux.go154
-rw-r--r--libpod/runtime_worker.go33
-rw-r--r--libpod/service.go4
-rw-r--r--libpod/shutdown/handler.go4
-rw-r--r--libpod/state.go9
-rw-r--r--libpod/stats.go35
-rw-r--r--libpod/storage.go12
-rw-r--r--libpod/util.go21
-rw-r--r--libpod/util_linux.go38
-rw-r--r--libpod/volume.go2
-rw-r--r--libpod/volume_inspect.go8
-rw-r--r--libpod/volume_internal.go10
-rw-r--r--libpod/volume_internal_linux.go13
-rw-r--r--pkg/api/handlers/compat/auth.go4
-rw-r--r--pkg/api/handlers/compat/changes.go6
-rw-r--r--pkg/api/handlers/compat/containers.go54
-rw-r--r--pkg/api/handlers/compat/containers_archive.go52
-rw-r--r--pkg/api/handlers/compat/containers_attach.go13
-rw-r--r--pkg/api/handlers/compat/containers_create.go48
-rw-r--r--pkg/api/handlers/compat/containers_export.go10
-rw-r--r--pkg/api/handlers/compat/containers_logs.go9
-rw-r--r--pkg/api/handlers/compat/containers_prune.go5
-rw-r--r--pkg/api/handlers/compat/containers_restart.go7
-rw-r--r--pkg/api/handlers/compat/containers_stats.go49
-rw-r--r--pkg/api/handlers/compat/containers_stop.go7
-rw-r--r--pkg/api/handlers/compat/containers_top.go3
-rw-r--r--pkg/api/handlers/compat/events.go6
-rw-r--r--pkg/api/handlers/compat/exec.go24
-rw-r--r--pkg/api/handlers/compat/images.go76
-rw-r--r--pkg/api/handlers/compat/images_build.go206
-rw-r--r--pkg/api/handlers/compat/images_history.go6
-rw-r--r--pkg/api/handlers/compat/images_prune.go4
-rw-r--r--pkg/api/handlers/compat/images_push.go12
-rw-r--r--pkg/api/handlers/compat/images_remove.go15
-rw-r--r--pkg/api/handlers/compat/images_save.go2
-rw-r--r--pkg/api/handlers/compat/images_search.go3
-rw-r--r--pkg/api/handlers/compat/images_tag.go8
-rw-r--r--pkg/api/handlers/compat/info.go7
-rw-r--r--pkg/api/handlers/compat/networks.go92
-rw-r--r--pkg/api/handlers/compat/resize.go17
-rw-r--r--pkg/api/handlers/compat/secrets.go12
-rw-r--r--pkg/api/handlers/compat/version.go3
-rw-r--r--pkg/api/handlers/compat/volumes.go21
-rw-r--r--pkg/api/handlers/libpod/containers.go28
-rw-r--r--pkg/api/handlers/libpod/containers_create.go4
-rw-r--r--pkg/api/handlers/libpod/containers_stats.go10
-rw-r--r--pkg/api/handlers/libpod/generate.go10
-rw-r--r--pkg/api/handlers/libpod/images.go157
-rw-r--r--pkg/api/handlers/libpod/images_pull.go5
-rw-r--r--pkg/api/handlers/libpod/manifests.go43
-rw-r--r--pkg/api/handlers/libpod/networks.go18
-rw-r--r--pkg/api/handlers/libpod/play.go14
-rw-r--r--pkg/api/handlers/libpod/pods.go102
-rw-r--r--pkg/api/handlers/libpod/secrets.go4
-rw-r--r--pkg/api/handlers/libpod/swagger_spec.go5
-rw-r--r--pkg/api/handlers/libpod/system.go6
-rw-r--r--pkg/api/handlers/libpod/volumes.go14
-rw-r--r--pkg/api/handlers/swagger/responses.go7
-rw-r--r--pkg/api/handlers/types.go8
-rw-r--r--pkg/api/handlers/utils/containers.go11
-rw-r--r--pkg/api/handlers/utils/errors.go26
-rw-r--r--pkg/api/handlers/utils/handler.go8
-rw-r--r--pkg/api/handlers/utils/images.go25
-rw-r--r--pkg/api/server/listener_api.go11
-rw-r--r--pkg/api/server/register_images.go38
-rw-r--r--pkg/api/server/server.go2
-rw-r--r--pkg/auth/auth.go18
-rw-r--r--pkg/autoupdate/autoupdate.go40
-rw-r--r--pkg/bindings/connection.go31
-rw-r--r--pkg/bindings/containers/archive.go4
-rw-r--r--pkg/bindings/containers/attach.go18
-rw-r--r--pkg/bindings/containers/containers.go11
-rw-r--r--pkg/bindings/containers/exec.go7
-rw-r--r--pkg/bindings/containers/logs.go2
-rw-r--r--pkg/bindings/containers/types.go3
-rw-r--r--pkg/bindings/containers/types_copy_options.go15
-rw-r--r--pkg/bindings/errors.go5
-rw-r--r--pkg/bindings/images/build.go36
-rw-r--r--pkg/bindings/images/build_unix.go2
-rw-r--r--pkg/bindings/images/images.go24
-rw-r--r--pkg/bindings/images/pull.go5
-rw-r--r--pkg/bindings/images/rm.go3
-rw-r--r--pkg/bindings/images/types.go9
-rw-r--r--pkg/bindings/images/types_push_options.go15
-rw-r--r--pkg/bindings/images/types_remove_options.go15
-rw-r--r--pkg/bindings/images/types_scp_options.go12
-rw-r--r--pkg/bindings/manifests/manifests.go31
-rw-r--r--pkg/bindings/manifests/types.go22
-rw-r--r--pkg/bindings/manifests/types_modify_options.go8
-rw-r--r--pkg/bindings/play/play.go1
-rw-r--r--pkg/bindings/system/system.go6
-rw-r--r--pkg/bindings/test/common_test.go3
-rw-r--r--pkg/bindings/test/manifests_test.go13
-rw-r--r--pkg/channel/writer.go3
-rw-r--r--pkg/checkpoint/checkpoint_restore.go31
-rw-r--r--pkg/checkpoint/crutils/checkpoint_restore_utils.go27
-rw-r--r--pkg/copy/fileinfo.go7
-rw-r--r--pkg/copy/parse.go5
-rw-r--r--pkg/criu/criu.go43
-rw-r--r--pkg/criu/criu_linux.go44
-rw-r--r--pkg/criu/criu_unsupported.go8
-rw-r--r--pkg/ctime/ctime_linux.go2
-rw-r--r--pkg/domain/entities/container_ps.go4
-rw-r--r--pkg/domain/entities/containers.go41
-rw-r--r--pkg/domain/entities/engine_container.go2
-rw-r--r--pkg/domain/entities/engine_image.go2
-rw-r--r--pkg/domain/entities/events.go10
-rw-r--r--pkg/domain/entities/images.go22
-rw-r--r--pkg/domain/entities/machine.go18
-rw-r--r--pkg/domain/entities/manifest.go15
-rw-r--r--pkg/domain/entities/network.go2
-rw-r--r--pkg/domain/entities/pods.go38
-rw-r--r--pkg/domain/entities/reports/containers.go2
-rw-r--r--pkg/domain/entities/reports/prune.go2
-rw-r--r--pkg/domain/entities/reports/scp.go5
-rw-r--r--pkg/domain/entities/system.go1
-rw-r--r--pkg/domain/entities/types.go9
-rw-r--r--pkg/domain/entities/volumes.go11
-rw-r--r--pkg/domain/filters/containers.go13
-rw-r--r--pkg/domain/filters/pods.go16
-rw-r--r--pkg/domain/filters/volumes.go16
-rw-r--r--pkg/domain/infra/abi/archive.go4
-rw-r--r--pkg/domain/infra/abi/containers.go154
-rw-r--r--pkg/domain/infra/abi/containers_runlabel.go8
-rw-r--r--pkg/domain/infra/abi/generate.go9
-rw-r--r--pkg/domain/infra/abi/images.go197
-rw-r--r--pkg/domain/infra/abi/images_list.go18
-rw-r--r--pkg/domain/infra/abi/images_test.go36
-rw-r--r--pkg/domain/infra/abi/manifest.go47
-rw-r--r--pkg/domain/infra/abi/network.go107
-rw-r--r--pkg/domain/infra/abi/parse/parse.go24
-rw-r--r--pkg/domain/infra/abi/play.go82
-rw-r--r--pkg/domain/infra/abi/pods.go229
-rw-r--r--pkg/domain/infra/abi/pods_stats.go4
-rw-r--r--pkg/domain/infra/abi/secrets.go9
-rw-r--r--pkg/domain/infra/abi/system.go83
-rw-r--r--pkg/domain/infra/abi/terminal/sigproxy_linux.go6
-rw-r--r--pkg/domain/infra/abi/terminal/terminal.go14
-rw-r--r--pkg/domain/infra/abi/terminal/terminal_linux.go14
-rw-r--r--pkg/domain/infra/abi/trust.go15
-rw-r--r--pkg/domain/infra/abi/volumes.go16
-rw-r--r--pkg/domain/infra/runtime_abi.go2
-rw-r--r--pkg/domain/infra/runtime_libpod.go33
-rw-r--r--pkg/domain/infra/tunnel/auto-update.go2
-rw-r--r--pkg/domain/infra/tunnel/containers.go32
-rw-r--r--pkg/domain/infra/tunnel/events.go7
-rw-r--r--pkg/domain/infra/tunnel/helpers.go13
-rw-r--r--pkg/domain/infra/tunnel/images.go46
-rw-r--r--pkg/domain/infra/tunnel/manifest.go16
-rw-r--r--pkg/domain/infra/tunnel/network.go5
-rw-r--r--pkg/domain/infra/tunnel/pods.go14
-rw-r--r--pkg/domain/infra/tunnel/secrets.go6
-rw-r--r--pkg/domain/infra/tunnel/volumes.go9
-rw-r--r--pkg/domain/utils/scp.go580
-rw-r--r--pkg/domain/utils/secrets_filters.go4
-rw-r--r--pkg/domain/utils/utils_test.go39
-rw-r--r--pkg/env/env.go8
-rw-r--r--pkg/errorhandling/errorhandling.go23
-rw-r--r--pkg/errorhandling/errorhandling_test.go53
-rw-r--r--pkg/hooks/1.0.0/hook.go8
-rw-r--r--pkg/hooks/1.0.0/when.go9
-rw-r--r--pkg/hooks/exec/exec.go3
-rw-r--r--pkg/hooks/exec/runtimeconfigfilter.go4
-rw-r--r--pkg/hooks/exec/runtimeconfigfilter_test.go30
-rw-r--r--pkg/hooks/hooks.go3
-rw-r--r--pkg/hooks/read.go13
-rw-r--r--pkg/inspect/inspect.go15
-rw-r--r--pkg/k8s.io/apimachinery/pkg/api/resource/amount.go2
-rw-r--r--pkg/k8s.io/apimachinery/pkg/api/resource/math.go4
-rw-r--r--pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go9
-rw-r--r--pkg/kubeutils/LICENSE201
-rw-r--r--pkg/machine/config.go50
-rw-r--r--pkg/machine/config_test.go3
-rw-r--r--pkg/machine/connection.go4
-rw-r--r--pkg/machine/e2e/config.go21
-rw-r--r--pkg/machine/e2e/config_info.go20
-rw-r--r--pkg/machine/e2e/info_test.go58
-rw-r--r--pkg/machine/e2e/init_test.go19
-rw-r--r--pkg/machine/e2e/inspect_test.go22
-rw-r--r--pkg/machine/e2e/list_test.go34
-rw-r--r--pkg/machine/e2e/machine_test.go9
-rw-r--r--pkg/machine/e2e/set_test.go13
-rw-r--r--pkg/machine/e2e/ssh_test.go7
-rw-r--r--pkg/machine/fcos.go12
-rw-r--r--pkg/machine/fedora.go6
-rw-r--r--pkg/machine/ignition.go4
-rw-r--r--pkg/machine/keys.go30
-rw-r--r--pkg/machine/qemu/config.go4
-rw-r--r--pkg/machine/qemu/config_test.go3
-rw-r--r--pkg/machine/qemu/machine.go259
-rw-r--r--pkg/machine/qemu/machine_test.go3
-rw-r--r--pkg/machine/qemu/options_darwin_arm64.go23
-rw-r--r--pkg/machine/wsl/machine.go203
-rw-r--r--pkg/machine/wsl/util_windows.go30
-rw-r--r--pkg/namespaces/namespaces.go2
-rw-r--r--pkg/parallel/parallel.go5
-rw-r--r--pkg/ps/ps.go23
-rw-r--r--pkg/resolvconf/dns/resolvconf.go28
-rw-r--r--pkg/rootless/rootless.go2
-rw-r--r--pkg/rootless/rootless_linux.c24
-rw-r--r--pkg/rootless/rootless_linux.go63
-rw-r--r--pkg/rootless/rootless_unsupported.go2
-rw-r--r--pkg/seccomp/seccomp.go5
-rw-r--r--pkg/signal/signal_common.go21
-rw-r--r--pkg/signal/signal_linux.go21
-rw-r--r--pkg/signal/signal_linux_mipsx.go21
-rw-r--r--pkg/signal/signal_unix.go15
-rw-r--r--pkg/signal/signal_unsupported.go15
-rw-r--r--pkg/specgen/config_unsupported.go3
-rw-r--r--pkg/specgen/container_validate.go44
-rw-r--r--pkg/specgen/generate/config_linux.go150
-rw-r--r--pkg/specgen/generate/config_linux_cgo.go11
-rw-r--r--pkg/specgen/generate/container.go38
-rw-r--r--pkg/specgen/generate/container_create.go51
-rw-r--r--pkg/specgen/generate/kube/kube.go67
-rw-r--r--pkg/specgen/generate/kube/play_test.go4
-rw-r--r--pkg/specgen/generate/kube/seccomp.go6
-rw-r--r--pkg/specgen/generate/kube/volume.go31
-rw-r--r--pkg/specgen/generate/namespaces.go94
-rw-r--r--pkg/specgen/generate/oci.go43
-rw-r--r--pkg/specgen/generate/pod_create.go107
-rw-r--r--pkg/specgen/generate/ports.go27
-rw-r--r--pkg/specgen/generate/security.go27
-rw-r--r--pkg/specgen/generate/storage.go38
-rw-r--r--pkg/specgen/generate/validate.go24
-rw-r--r--pkg/specgen/namespaces.go116
-rw-r--r--pkg/specgen/namespaces_test.go25
-rw-r--r--pkg/specgen/pod_validate.go6
-rw-r--r--pkg/specgen/podspecgen.go11
-rw-r--r--pkg/specgen/specgen.go12
-rw-r--r--pkg/specgen/volumes.go55
-rw-r--r--pkg/specgen/winpath.go3
-rw-r--r--pkg/specgenutil/createparse.go22
-rw-r--r--pkg/specgenutil/ports.go5
-rw-r--r--pkg/specgenutil/specgen.go133
-rw-r--r--pkg/specgenutil/specgenutil_test.go79
-rw-r--r--pkg/specgenutil/util.go47
-rw-r--r--pkg/specgenutil/volumes.go132
-rw-r--r--pkg/systemd/generate/common.go4
-rw-r--r--pkg/systemd/generate/containers.go14
-rw-r--r--pkg/systemd/generate/pods.go20
-rw-r--r--pkg/trust/trust.go12
-rw-r--r--pkg/util/filters.go4
-rw-r--r--pkg/util/mountOpts.go59
-rw-r--r--pkg/util/utils.go68
-rw-r--r--pkg/util/utils_darwin.go2
-rw-r--r--pkg/util/utils_linux.go146
-rw-r--r--pkg/util/utils_supported.go10
-rw-r--r--pkg/util/utils_unsupported.go6
-rw-r--r--pkg/util/utils_windows.go11
-rw-r--r--podman.spec.rpkg4
-rw-r--r--rootless.md4
-rw-r--r--test/apiv2/10-images.at14
-rw-r--r--test/apiv2/12-imagesMore.at67
-rw-r--r--test/apiv2/15-manifest.at24
-rw-r--r--test/apiv2/20-containers.at52
-rw-r--r--test/apiv2/27-containersEvents.at4
-rw-r--r--test/apiv2/35-networks.at4
-rw-r--r--test/apiv2/40-pods.at2
-rw-r--r--test/apiv2/60-auth.at4
-rw-r--r--test/apiv2/70-short-names.at23
-rw-r--r--test/apiv2/python/rest_api/fixtures/api_testcase.py4
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_container.py47
-rwxr-xr-xtest/apiv2/test-apiv2139
-rwxr-xr-xtest/buildah-bud/apply-podman-deltas41
-rw-r--r--test/buildah-bud/buildah-tests.diff25
-rwxr-xr-xtest/buildah-bud/run-buildah-bud-tests6
-rw-r--r--test/compose/disable_healthcheck/docker-compose.yml10
-rw-r--r--test/compose/disable_healthcheck/tests.sh2
-rw-r--r--test/compose/update_network_mtu/docker-compose.yml26
-rw-r--r--test/compose/update_network_mtu/tests.sh10
-rw-r--r--test/e2e/benchmarks_test.go2
-rw-r--r--test/e2e/build/Containerfile.with-platform1
-rw-r--r--test/e2e/build_test.go30
-rw-r--r--test/e2e/checkpoint_test.go140
-rw-r--r--test/e2e/commit_test.go2
-rw-r--r--test/e2e/common_test.go54
-rw-r--r--test/e2e/config.go8
-rw-r--r--test/e2e/config_amd64.go24
-rw-r--r--test/e2e/config_ppc64le.go8
-rw-r--r--test/e2e/container_inspect_test.go2
-rw-r--r--test/e2e/create_staticip_test.go2
-rw-r--r--test/e2e/create_staticmac_test.go2
-rw-r--r--test/e2e/create_test.go6
-rw-r--r--test/e2e/events_test.go21
-rw-r--r--test/e2e/generate_systemd_test.go8
-rw-r--r--test/e2e/healthcheck_run_test.go10
-rw-r--r--test/e2e/image_scp_test.go32
-rw-r--r--test/e2e/inspect_test.go4
-rw-r--r--test/e2e/kill_test.go20
-rw-r--r--test/e2e/libpod_suite_test.go5
-rw-r--r--test/e2e/logs_test.go33
-rw-r--r--test/e2e/manifest_test.go68
-rw-r--r--test/e2e/network_connect_disconnect_test.go19
-rw-r--r--test/e2e/network_test.go36
-rw-r--r--test/e2e/pause_test.go6
-rw-r--r--test/e2e/play_kube_test.go28
-rw-r--r--test/e2e/pod_clone_test.go194
-rw-r--r--test/e2e/pod_create_test.go103
-rw-r--r--test/e2e/pod_infra_container_test.go29
-rw-r--r--test/e2e/pod_pause_test.go47
-rw-r--r--test/e2e/prune_test.go23
-rw-r--r--test/e2e/pull_test.go8
-rw-r--r--test/e2e/push_test.go10
-rw-r--r--test/e2e/rmi_test.go38
-rw-r--r--test/e2e/run_aardvark_test.go55
-rw-r--r--test/e2e/run_networking_test.go4
-rw-r--r--test/e2e/run_privileged_test.go24
-rw-r--r--test/e2e/run_staticip_test.go4
-rw-r--r--test/e2e/run_test.go54
-rw-r--r--test/e2e/run_volume_test.go110
-rw-r--r--test/e2e/save_test.go10
-rw-r--r--test/e2e/search_test.go15
-rw-r--r--test/e2e/stats_test.go11
-rw-r--r--test/e2e/system_connection_test.go24
-rw-r--r--test/e2e/system_df_test.go24
-rw-r--r--test/e2e/system_reset_test.go12
-rw-r--r--test/e2e/systemd_activate_test.go57
-rw-r--r--test/e2e/systemd_test.go2
-rw-r--r--test/e2e/tree_test.go2
-rw-r--r--test/e2e/untag_test.go12
-rw-r--r--test/e2e/volume_create_test.go32
-rw-r--r--test/e2e/volume_ls_test.go31
-rw-r--r--test/e2e/volume_plugin_test.go68
-rw-r--r--test/framework/framework.go2
-rw-r--r--test/system/010-images.bats5
-rw-r--r--test/system/015-help.bats60
-rw-r--r--test/system/030-run.bats44
-rw-r--r--test/system/050-stop.bats15
-rw-r--r--test/system/055-rm.bats10
-rw-r--r--test/system/060-mount.bats10
-rw-r--r--test/system/065-cp.bats104
-rw-r--r--test/system/070-build.bats9
-rw-r--r--test/system/120-load.bats26
-rw-r--r--test/system/130-kill.bats10
-rw-r--r--test/system/150-login.bats2
-rw-r--r--test/system/160-volumes.bats62
-rw-r--r--test/system/170-run-userns.bats39
-rw-r--r--test/system/200-pod.bats68
-rw-r--r--test/system/250-systemd.bats70
-rw-r--r--test/system/410-selinux.bats6
-rw-r--r--test/system/500-networking.bats29
-rw-r--r--test/system/520-checkpoint.bats30
-rw-r--r--test/system/600-completion.bats102
-rwxr-xr-xtest/system/build-testimage76
-rw-r--r--test/system/helpers.bash47
-rw-r--r--test/system/helpers.systemd.bash4
-rw-r--r--test/testvol/Containerfile9
-rw-r--r--test/testvol/create.go26
-rw-r--r--test/testvol/list.go32
-rw-r--r--test/testvol/main.go53
-rw-r--r--test/testvol/remove.go26
-rw-r--r--test/testvol/util.go29
-rw-r--r--test/tools/go.mod2
-rw-r--r--test/tools/go.sum11
-rw-r--r--test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile1
-rw-r--r--test/tools/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md1
-rw-r--r--test/tools/vendor/github.com/hashicorp/go-version/.travis.yml8
-rw-r--r--test/tools/vendor/github.com/hashicorp/go-version/LICENSE1
-rw-r--r--test/tools/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md2
-rw-r--r--test/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md2
-rw-r--r--test/tools/vendor/github.com/sirupsen/logrus/README.md2
-rw-r--r--test/tools/vendor/github.com/sirupsen/logrus/appveyor.yml8
-rw-r--r--test/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go1
-rw-r--r--test/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go1
-rw-r--r--test/tools/vendor/github.com/vbatts/git-validation/README.md3
-rw-r--r--test/tools/vendor/golang.org/x/mod/module/module.go6
-rw-r--r--test/tools/vendor/golang.org/x/tools/cmd/goimports/doc.go25
-rw-r--r--test/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go29
-rw-r--r--test/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go7
-rw-r--r--test/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go11
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go6
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go6
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go20
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/imports/imports.go27
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go39
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/imports/zstdlib.go61
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/typeparams/common.go21
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/typeparams/coretype.go122
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go12
-rw-r--r--test/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go9
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/LICENSE27
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/PATENTS22
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/README2
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/adaptor.go193
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/codereview.cfg1
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/doc.go22
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/errors.go33
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/fmt.go187
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/format.go34
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/frame.go56
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/go.mod3
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/internal/internal.go8
-rw-r--r--test/tools/vendor/golang.org/x/xerrors/wrap.go106
-rw-r--r--test/tools/vendor/modules.txt7
-rw-r--r--troubleshooting.md75
-rw-r--r--utils/ports.go9
-rw-r--r--utils/testdata/cgroup.empty0
-rw-r--r--utils/testdata/cgroup.other1
-rw-r--r--utils/testdata/cgroup.root1
-rw-r--r--utils/utils.go85
-rw-r--r--utils/utils_supported.go23
-rw-r--r--utils/utils_test.go26
-rw-r--r--utils/utils_windows.go6
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go188
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go9
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go111
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go499
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go315
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go111
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go9
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go57
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go22
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/exec.go45
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml6
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md5
-rw-r--r--vendor/github.com/containers/buildah/Makefile9
-rw-r--r--vendor/github.com/containers/buildah/bind/mount.go8
-rw-r--r--vendor/github.com/containers/buildah/bind/util.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt4
-rw-r--r--vendor/github.com/containers/buildah/chroot/seccomp.go3
-rw-r--r--vendor/github.com/containers/buildah/commit.go4
-rw-r--r--vendor/github.com/containers/buildah/config.go2
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go27
-rw-r--r--vendor/github.com/containers/buildah/define/build.go29
-rw-r--r--vendor/github.com/containers/buildah/define/types.go37
-rw-r--r--vendor/github.com/containers/buildah/define/types_unix.go11
-rw-r--r--vendor/github.com/containers/buildah/go.mod109
-rw-r--r--vendor/github.com/containers/buildah/go.sum101
-rw-r--r--vendor/github.com/containers/buildah/image.go93
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go54
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go8
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go215
-rw-r--r--vendor/github.com/containers/buildah/install.md8
-rw-r--r--vendor/github.com/containers/buildah/internal/parse/parse.go11
-rw-r--r--vendor/github.com/containers/buildah/internal/types.go6
-rw-r--r--vendor/github.com/containers/buildah/internal/util/util.go56
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/build.go373
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go22
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go4
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go27
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse_unix.go10
-rw-r--r--vendor/github.com/containers/buildah/run.go25
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go294
-rw-r--r--vendor/github.com/containers/buildah/run_unix.go14
-rw-r--r--vendor/github.com/containers/buildah/util/util.go17
-rw-r--r--vendor/github.com/containers/common/libimage/copier.go4
-rw-r--r--vendor/github.com/containers/common/libimage/define/search.go13
-rw-r--r--vendor/github.com/containers/common/libimage/inspect.go4
-rw-r--r--vendor/github.com/containers/common/libimage/load.go5
-rw-r--r--vendor/github.com/containers/common/libimage/normalize.go38
-rw-r--r--vendor/github.com/containers/common/libimage/platform.go87
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go31
-rw-r--r--vendor/github.com/containers/common/libimage/runtime.go74
-rw-r--r--vendor/github.com/containers/common/libimage/search.go9
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_exec.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_types.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/config.go14
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go17
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/config_linux.go20
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/network.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/run.go15
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go13
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/run_linux.go17
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/config.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/const.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/exec.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/ipam.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/network.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/run.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/network/interface.go10
-rw-r--r--vendor/github.com/containers/common/libnetwork/network/interface_freebsd.go10
-rw-r--r--vendor/github.com/containers/common/libnetwork/network/interface_linux.go10
-rw-r--r--vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go182
-rw-r--r--vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go (renamed from pkg/resolvconf/resolvconf.go)158
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/blkio.go3
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go161
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups.go62
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go575
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go2
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cpu.go74
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go100
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cpuset.go41
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go57
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/memory.go3
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/memory_linux.go78
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/pids.go3
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/pids_linux.go71
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/systemd.go7
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go167
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/utils.go176
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/utils_linux.go146
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go80
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_local.go14
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf10
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go108
-rw-r--r--vendor/github.com/containers/common/pkg/config/pull_policy.go6
-rw-r--r--vendor/github.com/containers/common/pkg/parse/parse.go7
-rw-r--r--vendor/github.com/containers/common/pkg/resize/resize.go (renamed from pkg/kubeutils/resize.go)14
-rw-r--r--vendor/github.com/containers/common/pkg/retry/retry.go41
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/default_linux.go3
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/filter.go3
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp.json3
-rw-r--r--vendor/github.com/containers/common/pkg/util/copy.go57
-rw-r--r--vendor/github.com/containers/image/v5/copy/blob.go170
-rw-r--r--vendor/github.com/containers/image/v5/copy/compression.go320
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go440
-rw-r--r--vendor/github.com/containers/image/v5/copy/encrypt.go24
-rw-r--r--vendor/github.com/containers/image/v5/copy/encryption.go129
-rw-r--r--vendor/github.com/containers/image/v5/copy/manifest.go68
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_transport.go5
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go5
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go2
-rw-r--r--vendor/github.com/containers/image/v5/image/docker_schema2.go392
-rw-r--r--vendor/github.com/containers/image/v5/image/sourced.go73
-rw-r--r--vendor/github.com/containers/image/v5/image/unparsed.go82
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/docker_list.go (renamed from vendor/github.com/containers/image/v5/image/docker_list.go)0
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/docker_schema1.go (renamed from vendor/github.com/containers/image/v5/image/docker_schema1.go)9
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/docker_schema2.go413
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/manifest.go (renamed from vendor/github.com/containers/image/v5/image/manifest.go)15
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/memory.go (renamed from vendor/github.com/containers/image/v5/image/memory.go)0
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/oci.go (renamed from vendor/github.com/containers/image/v5/image/oci.go)27
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/oci_index.go (renamed from vendor/github.com/containers/image/v5/image/oci_index.go)0
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/sourced.go134
-rw-r--r--vendor/github.com/containers/image/v5/internal/image/unparsed.go99
-rw-r--r--vendor/github.com/containers/image/v5/internal/manifest/errors.go32
-rw-r--r--vendor/github.com/containers/image/v5/manifest/common.go61
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2.go8
-rw-r--r--vendor/github.com/containers/image/v5/manifest/manifest.go5
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go94
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift_transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_transport.go14
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go8
-rw-r--r--vendor/github.com/containers/image/v5/sif/transport.go8
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_image.go8
-rw-r--r--vendor/github.com/containers/image/v5/tarball/tarball_reference.go13
-rw-r--r--vendor/github.com/containers/ocicrypt/.travis.yml2
-rw-r--r--vendor/github.com/containers/ocicrypt/config/constructors.go2
-rw-r--r--vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go2
-rw-r--r--vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go2
-rw-r--r--vendor/github.com/containers/ocicrypt/go.mod2
-rw-r--r--vendor/github.com/containers/ocicrypt/go.sum4
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml8
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_darwin.go109
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_unix.go34
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_darwin.go14
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_linux.go24
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_unsupported.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go9
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go5
-rw-r--r--vendor/github.com/containers/storage/go.mod12
-rw-r--r--vendor/github.com/containers/storage/go.sum41
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go30
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go21
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go41
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go105
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go44
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go60
-rw-r--r--vendor/github.com/containers/storage/pkg/stringid/stringid.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go84
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go84
-rw-r--r--vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare.c2
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare.go24
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go53
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c76
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go179
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go30
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go8
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go3
-rw-r--r--vendor/github.com/containers/storage/storage.conf22
-rw-r--r--vendor/github.com/containers/storage/store.go26
-rw-r--r--vendor/github.com/containers/storage/types/options.go26
-rw-r--r--vendor/github.com/containers/storage/types/utils.go2
-rw-r--r--vendor/github.com/containers/storage/userns.go8
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml2
-rw-r--r--vendor/github.com/docker/docker/pkg/system/mknod.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/system/mknod_unix.go14
-rw-r--r--vendor/github.com/docker/libnetwork/resolvconf/README.md1
-rw-r--r--vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go26
-rw-r--r--vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go285
-rw-r--r--vendor/github.com/docker/libnetwork/types/types.go653
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/AUTHORS210
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/README.md6
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_stats.go24
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.mod22
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.sum50
-rw-r--r--vendor/github.com/imdario/mergo/README.md32
-rw-r--r--vendor/github.com/imdario/mergo/go.mod2
-rw-r--r--vendor/github.com/imdario/mergo/go.sum4
-rw-r--r--vendor/github.com/imdario/mergo/merge.go2
-rw-r--r--vendor/github.com/imdario/mergo/mergo.go4
-rw-r--r--vendor/github.com/ishidawataru/sctp/.gitignore16
-rw-r--r--vendor/github.com/ishidawataru/sctp/.travis.yml29
-rw-r--r--vendor/github.com/ishidawataru/sctp/GO_LICENSE27
-rw-r--r--vendor/github.com/ishidawataru/sctp/LICENSE201
-rw-r--r--vendor/github.com/ishidawataru/sctp/NOTICE3
-rw-r--r--vendor/github.com/ishidawataru/sctp/README.md18
-rw-r--r--vendor/github.com/ishidawataru/sctp/go.mod3
-rw-r--r--vendor/github.com/ishidawataru/sctp/ipsock_linux.go222
-rw-r--r--vendor/github.com/ishidawataru/sctp/sctp.go729
-rw-r--r--vendor/github.com/ishidawataru/sctp/sctp_linux.go305
-rw-r--r--vendor/github.com/ishidawataru/sctp/sctp_unsupported.go98
-rw-r--r--vendor/github.com/klauspost/compress/README.md23
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go36
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bitreader.go10
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bitwriter.go115
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bytereader.go10
-rw-r--r--vendor/github.com/klauspost/compress/huff0/compress.go1
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go113
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.go82
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s203
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_generic.go102
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitreader.go7
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitwriter.go76
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go31
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytebuf.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytereader.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go93
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_better.go8
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go10
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go5
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder.go40
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_encoder.go23
-rw-r--r--vendor/github.com/klauspost/compress/zstd/hash.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go102
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go16
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s622
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zip.go9
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zstd.go11
-rw-r--r--vendor/github.com/moby/sys/mount/flags_bsd.go1
-rw-r--r--vendor/github.com/moby/sys/mount/flags_unix.go3
-rw-r--r--vendor/github.com/moby/sys/mount/go.mod6
-rw-r--r--vendor/github.com/moby/sys/mount/go.sum9
-rw-r--r--vendor/github.com/moby/sys/mount/mount_errors.go3
-rw-r--r--vendor/github.com/moby/sys/mount/mount_unix.go3
-rw-r--r--vendor/github.com/moby/sys/mount/mounter_freebsd.go (renamed from vendor/github.com/moby/sys/mount/mounter_bsd.go)3
-rw-r--r--vendor/github.com/moby/sys/mount/mounter_linux.go1
-rw-r--r--vendor/github.com/moby/sys/mount/mounter_openbsd.go78
-rw-r--r--vendor/github.com/moby/sys/mount/mounter_unsupported.go3
-rw-r--r--vendor/github.com/moby/sys/mountinfo/go.mod2
-rw-r--r--vendor/github.com/moby/sys/mountinfo/go.sum4
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mounted_linux.go2
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mounted_unix.go4
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo.go2
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go44
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go14
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go11
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go4
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go15
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go311
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go129
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go166
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go245
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go39
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go15
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go158
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go264
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go62
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go348
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go31
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go32
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go30
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go186
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go24
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go62
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go25
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go87
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go28
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/create.go152
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go99
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go127
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go271
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go48
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go193
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go216
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go72
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go121
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go145
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go8
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/config.go3
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go6
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go19
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/.golangci.yml4
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/.travis.yml57
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/CHANGELOG25
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md26
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/Makefile7
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/README.md46
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/SECURITY.md48
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/go.sum23
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp.go264
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go192
-rw-r--r--vendor/github.com/spf13/cobra/CHANGELOG.md51
-rw-r--r--vendor/github.com/spf13/cobra/README.md6
-rw-r--r--vendor/github.com/spf13/cobra/active_help.go49
-rw-r--r--vendor/github.com/spf13/cobra/active_help.md157
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go19
-rw-r--r--vendor/github.com/spf13/cobra/bash_completionsV2.go180
-rw-r--r--vendor/github.com/spf13/cobra/command.go26
-rw-r--r--vendor/github.com/spf13/cobra/completions.go27
-rw-r--r--vendor/github.com/spf13/cobra/fish_completions.go9
-rw-r--r--vendor/github.com/spf13/cobra/flag_groups.go223
-rw-r--r--vendor/github.com/spf13/cobra/go.mod2
-rw-r--r--vendor/github.com/spf13/cobra/go.sum12
-rw-r--r--vendor/github.com/spf13/cobra/powershell_completions.go7
-rw-r--r--vendor/github.com/spf13/cobra/projects_using_cobra.md13
-rw-r--r--vendor/github.com/spf13/cobra/shell_completions.md6
-rw-r--r--vendor/github.com/spf13/cobra/user_guide.md40
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go35
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_compare.go24
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go2
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go10
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go20
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go78
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go26
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go20
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/go.mod2
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/go.sum4
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu.go4
-rw-r--r--vendor/golang.org/x/sys/execabs/execabs.go2
-rw-r--r--vendor/golang.org/x/sys/execabs/execabs_go118.go12
-rw-r--r--vendor/golang.org/x/sys/execabs/execabs_go119.go15
-rw-r--r--vendor/golang.org/x/sys/plan9/syscall.go1
-rw-r--r--vendor/golang.org/x/sys/plan9/syscall_plan9.go10
-rw-r--r--vendor/golang.org/x/sys/unix/ifreq_linux.go9
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_aix.go6
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_dragonfly.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go112
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_openbsd.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go24
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s6
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go24
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s6
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go8
-rw-r--r--vendor/golang.org/x/sys/windows/exec_windows.go10
-rw-r--r--vendor/golang.org/x/sys/windows/registry/key.go1
-rw-r--r--vendor/golang.org/x/sys/windows/syscall_windows.go1
-rw-r--r--vendor/golang.org/x/term/term.go10
-rw-r--r--vendor/golang.org/x/term/terminal.go2
-rw-r--r--vendor/gopkg.in/yaml.v3/decode.go78
-rw-r--r--vendor/gopkg.in/yaml.v3/parserc.go11
-rw-r--r--vendor/modules.txt66
1084 files changed, 28642 insertions, 15814 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index ee0131279..81bbe7c8f 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -30,8 +30,10 @@ env:
PRIOR_FEDORA_NAME: "fedora-35"
UBUNTU_NAME: "ubuntu-2110"
- # Google-cloud VM Images
+ # Image identifiers
IMAGE_SUFFIX: "c6211193021923328"
+ FEDORA_AMI_ID: "ami-06a41d8a81ab56afa"
+ # Complete image names
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
@@ -59,19 +61,24 @@ env:
curl --fail --location -O
--url https://api.cirrus-ci.com/v1/artifact/build/${CIRRUS_BUILD_ID}
+
# Default timeout for each task
timeout_in: 60m
gcp_credentials: ENCRYPTED[a28959877b2c9c36f151781b0a05407218cda646c7d047fc556e42f55e097e897ab63ee78369dae141dcf0b46a9d0cdd]
+aws_credentials: ENCRYPTED[4ca070bffe28eb9b27d63c568b52970dd46f119c3a83b8e443241e895dbf1737580b4d84eed27a311a2b74287ef9f79f]
+
# Attempt to prevent flakes by confirming all required external/3rd-party
# services are available and functional.
ext_svc_check_task:
alias: 'ext_svc_check' # int. ref. name - required for depends_on reference
name: "Ext. services" # Displayed Title - has no other significance
- skip: &tags "$CIRRUS_TAG != ''" # Don't run on tags
+ # Don't create this task for new tags so release process is more reliable
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: $CIRRUS_TAG == ''
# Default/small container image to execute tasks with
container: &smallcontainer
image: ${CTR_FQIN}
@@ -117,7 +124,9 @@ ext_svc_check_task:
automation_task:
alias: 'automation'
name: "Check Automation"
- skip: &branches_and_tags "$CIRRUS_PR == '' || $CIRRUS_TAG != ''" # Don't run on branches/tags
+ # This task is not needed for branches, tags, or cron runs.
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: &is_pr "$CIRRUS_PR != ''"
container: *smallcontainer
env:
TEST_FLAVOR: automation
@@ -138,6 +147,9 @@ automation_task:
build_task:
alias: 'build'
name: 'Build for $DISTRO_NV'
+ # Multiarch doesn't depend on buildability in this automation context
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: "$CIRRUS_CRON != 'multiarch'"
gce_instance: &standardvm
image_project: libpod-218412
zone: "us-central1-a"
@@ -189,7 +201,8 @@ validate_task:
# automation reliability/speed in those contexts. Any missed errors due
# to nonsequential PR merging practices, will be caught on a future PR,
# build or test task failures.
- skip: *branches_and_tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *is_pr
depends_on:
- ext_svc_check
- automation
@@ -218,9 +231,12 @@ validate_task:
bindings_task:
name: "Test Bindings"
alias: bindings
- # Don't run for [CI:DOCS] or [CI:BUILD]
- only_if: &not_build $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*'
- skip: *branches_and_tags
+ # Don't create task for PRs using [CI:DOCS] or [CI:BUILD]
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: >-
+ $CIRRUS_PR != '' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*'
depends_on:
- build
gce_instance: *standardvm
@@ -249,6 +265,11 @@ bindings_task:
swagger_task:
name: "Test Swagger"
alias: swagger
+ # Don't create task for [CI:BUILD] or multiarch builds
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: >-
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
+ $CIRRUS_CRON != 'multiarch'
depends_on:
- build
gce_instance: *standardvm
@@ -276,7 +297,8 @@ swagger_task:
consistency_task:
name: "Test Code Consistency"
alias: consistency
- skip: *tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *is_pr
depends_on:
- build
container: *smallcontainer
@@ -297,8 +319,11 @@ consistency_task:
alt_build_task:
name: "$ALT_NAME"
alias: alt_build
- # Don't run for [CI:DOCS]; DO run for [CI:BUILD]
- only_if: &not_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ # Don't create task for [CI:DOCS] or multiarch builds
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: &not_docs_multiarch >-
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
+ $CIRRUS_CRON != 'multiarch'
depends_on:
- build
env:
@@ -330,7 +355,8 @@ alt_build_task:
osx_alt_build_task:
name: "OSX Cross"
alias: osx_alt_build
- only_if: *not_docs
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_docs_multiarch
depends_on:
- build
env:
@@ -359,8 +385,14 @@ osx_alt_build_task:
docker-py_test_task:
name: Docker-py Compat.
alias: docker-py_test
- skip: *tags
- only_if: *not_build
+ # Don't create task for tags, branches, or PRs w/ [CI:DOCS] or [CI:BUILD]
+ # N/B: for PRs $CIRRUS_BRANCH == 'pull/<number>'
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: &not_tag_branch_build_docs >-
+ $CIRRUS_PR != '' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*'
+
depends_on:
- build
gce_instance: *standardvm
@@ -379,9 +411,10 @@ docker-py_test_task:
unit_test_task:
name: "Unit tests on $DISTRO_NV"
alias: unit_test
- skip: *tags
- only_if: *not_build
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
+ - build
- validate
matrix:
- env: *stdenvars
@@ -402,11 +435,12 @@ unit_test_task:
apiv2_test_task:
- name: "APIv2 test on $DISTRO_NV"
+ name: "APIv2 test on $DISTRO_NV ($PRIV_NAME)"
alias: apiv2_test
- only_if: *not_build
- skip: *tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
+ - build
- validate
gce_instance: *standardvm
# Test is normally pretty quick, about 10-minutes. If it hangs,
@@ -415,6 +449,11 @@ apiv2_test_task:
env:
<<: *stdenvars
TEST_FLAVOR: apiv2
+ matrix:
+ - env:
+ PRIV_NAME: root
+ - env:
+ PRIV_NAME: rootless
clone_script: *get_gosrc
setup_script: *setup
main_script: *main
@@ -424,9 +463,10 @@ apiv2_test_task:
compose_test_task:
name: "$TEST_FLAVOR test on $DISTRO_NV ($PRIV_NAME)"
alias: compose_test
- only_if: *not_build
- skip: *tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
+ - build
- validate
gce_instance: *standardvm
matrix:
@@ -457,9 +497,10 @@ local_integration_test_task: &local_integration_test_task
# <int.|sys.> <podman|remote> <Distro NV> <root|rootless>
name: &std_name_fmt "$TEST_FLAVOR $PODBIN_NAME $DISTRO_NV $PRIV_NAME $TEST_ENVIRON"
alias: local_integration_test
- only_if: *not_build
- skip: *branches_and_tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
+ - build
- unit_test
matrix: *platform_axis
gce_instance: *standardvm
@@ -492,9 +533,10 @@ remote_integration_test_task:
container_integration_test_task:
name: *std_name_fmt
alias: container_integration_test
- only_if: *not_build
- skip: *branches_and_tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
+ - build
- unit_test
matrix: &fedora_vm_axis
- env:
@@ -522,9 +564,10 @@ container_integration_test_task:
rootless_integration_test_task:
name: *std_name_fmt
alias: rootless_integration_test
- only_if: *not_build
- skip: *branches_and_tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
+ - build
- unit_test
matrix: *platform_axis
gce_instance: *standardvm
@@ -538,6 +581,35 @@ rootless_integration_test_task:
always: *int_logs_artifacts
+podman_machine_task:
+ name: *std_name_fmt
+ alias: podman_machine
+ only_if: *not_tag_branch_build_docs
+ # Manually-triggered task: This is "expensive" to run.
+ # DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`
+ # it will cause 'success' to block.
+ trigger_type: manual
+ depends_on:
+ - build
+ - local_integration_test
+ - remote_integration_test
+ - container_integration_test
+ - rootless_integration_test
+ ec2_instance:
+ image: "${VM_IMAGE_NAME}"
+ type: m5zn.metal # Bare-metal instance is required
+ region: us-east-1
+ env:
+ TEST_FLAVOR: "machine"
+ PRIV_NAME: "rootless" # intended use-case
+ DISTRO_NV: "${FEDORA_NAME}"
+ VM_IMAGE_NAME: "${FEDORA_AMI_ID}"
+ clone_script: *get_gosrc
+ setup_script: *setup
+ main_script: *main
+ always: *int_logs_artifacts
+
+
# Always run subsequent to integration tests. While parallelism is lost
# with runtime, debugging system-test failures can be more challenging
# for some golang developers. Otherwise the following tasks run across
@@ -545,10 +617,16 @@ rootless_integration_test_task:
local_system_test_task: &local_system_test_task
name: *std_name_fmt
alias: local_system_test
- skip: *tags
- only_if: *not_build
+ # Don't create task for tags, or if using [CI:DOCS], [CI:BUILD], multiarch
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: &not_tag_build_docs_multiarch >-
+ $CIRRUS_TAG == '' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
+ $CIRRUS_CRON != 'multiarch'
depends_on:
- - local_integration_test
+ - build
+ - local_integration_test
matrix: *platform_axis
gce_instance: *standardvm
env:
@@ -563,26 +641,28 @@ remote_system_test_task:
<<: *local_system_test_task
alias: remote_system_test
depends_on:
- - remote_integration_test
+ - build
+ - remote_integration_test
env:
TEST_FLAVOR: sys
PODBIN_NAME: remote
rootless_remote_system_test_task:
+ matrix:
+ # Minimal sanity testing: only the latest Fedora
+ - env:
+ DISTRO_NV: ${FEDORA_NAME}
+ # Not used here, is used in other tasks
+ VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}
+ CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
+ # ID for re-use of build output
+ _BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID}
<<: *local_system_test_task
alias: rootless_remote_system_test
depends_on:
- - remote_integration_test
- matrix:
- # Minimal sanity testing: only the latest Fedora
- - env:
- DISTRO_NV: ${FEDORA_NAME}
- # Not used here, is used in other tasks
- VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}
- CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
- # ID for re-use of build output
- _BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID}
+ - build
+ - remote_integration_test
gce_instance: *standardvm
env:
TEST_FLAVOR: sys
@@ -590,13 +670,33 @@ rootless_remote_system_test_task:
PRIV_NAME: rootless
+rootless_system_test_task:
+ name: *std_name_fmt
+ alias: rootless_system_test
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_build_docs_multiarch
+ depends_on:
+ - build
+ - rootless_integration_test
+ matrix: *platform_axis
+ gce_instance: *standardvm
+ env:
+ TEST_FLAVOR: sys
+ PRIV_NAME: rootless
+ clone_script: *get_gosrc
+ setup_script: *setup
+ main_script: *main
+ always: *logs_artifacts
+
+
buildah_bud_test_task:
name: *std_name_fmt
alias: buildah_bud_test
- skip: *tags
- only_if: *not_build
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
- - local_integration_test
+ - build
+ - local_integration_test
env:
TEST_FLAVOR: bud
DISTRO_NV: ${FEDORA_NAME}
@@ -618,35 +718,18 @@ buildah_bud_test_task:
always: *int_logs_artifacts
-rootless_system_test_task:
- name: *std_name_fmt
- alias: rootless_system_test
- skip: *tags
- only_if: *not_build
- depends_on:
- - rootless_integration_test
- matrix: *platform_axis
- gce_instance: *standardvm
- env:
- TEST_FLAVOR: sys
- PRIV_NAME: rootless
- clone_script: *get_gosrc
- setup_script: *setup
- main_script: *main
- always: *logs_artifacts
-
-
rootless_gitlab_test_task:
name: *std_name_fmt
alias: rootless_gitlab_test
- skip: *tags
- only_if: *not_build
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
# Community-maintained downstream test may fail unexpectedly.
# Ref. repository: https://gitlab.com/gitlab-org/gitlab-runner
# If necessary, uncomment the next line and file issue(s) with details.
# allow_failures: $CI == $CI
depends_on:
- - rootless_integration_test
+ - build
+ - rootless_integration_test
gce_instance: *standardvm
env:
<<: *ubuntu_envvars
@@ -666,10 +749,11 @@ rootless_gitlab_test_task:
upgrade_test_task:
name: "Upgrade test: from $PODMAN_UPGRADE_FROM"
alias: upgrade_test
- skip: *tags
- only_if: *not_build
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_tag_branch_build_docs
depends_on:
- - local_system_test
+ - build
+ - local_system_test
matrix:
- env:
PODMAN_UPGRADE_FROM: v2.1.1
@@ -695,6 +779,7 @@ image_build_task: &image-build
alias: image_build
# Some of these container images take > 1h to build, limit
# this task to a specific Cirrus-Cron entry with this name.
+ # Docs: ./contrib/cirrus/CIModes.md
only_if: $CIRRUS_CRON == 'multiarch'
depends_on:
- ext_svc_check
@@ -714,22 +799,25 @@ image_build_task: &image-build
- env:
CTXDIR: contrib/hello
env:
+ DISTRO_NV: "${FEDORA_NAME}" # Required for repo cache extraction
PODMAN_USERNAME: ENCRYPTED[b9f0f2550029dd2196e086d9dd6c2d1fec7e328630b15990d9bb610f9fcccb5baab8b64a8c3e72b0c1d0f5917cf65aa1]
PODMAN_PASSWORD: ENCRYPTED[e3444f6072853f0c8db7f964ead5e2204116af485469fa0de367f26b9316b460fd842a9882f552b9e9a83bbaf650d8b4]
CONTAINERS_USERNAME: ENCRYPTED[54a372d5f22f424173c114c6fb25c3214956cad323d5b285c7393a71041884ce96471d0ff733774e5dab9fa5a3c8795c]
CONTAINERS_PASSWORD: ENCRYPTED[4ecc3fb534935095a99fb1f2e320ac6bc87f3e7e186746e41cbcc4b5f5379a014b9fc8cc90e1f3d5abdbaf31580a4ab9]
- clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR
- script:
+ main_script:
- set -a; source /etc/automation_environment; set +a
- main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR
test_image_build_task:
<<: *image-build
- # Allow this to run inside a PR w/ [CI:BUILD]
- only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ alias: test_image_build
+ # Allow this to run inside a PR w/ [CI:BUILD] only.
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
# This takes a LONG time, only run when requested. N/B: Any task
# made to depend on this one will block FOREVER unless triggered.
+ # DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
trigger_type: manual
# Overwrite all 'env', don't push anything, just do the build.
env:
@@ -758,7 +846,7 @@ meta_task:
GCPJSON: ENCRYPTED[3a198350077849c8df14b723c0f4c9fece9ebe6408d35982e7adf2105a33f8e0e166ed3ed614875a0887e1af2b8775f4]
GCPNAME: ENCRYPTED[2f9738ef295a706f66a13891b40e8eaa92a89e0e87faf8bed66c41eca72bf76cfd190a6f2d0e8444c631fdf15ed32ef6]
GCPPROJECT: libpod-218412
- clone_script: *noop
+ clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR
script: /usr/local/bin/entrypoint.sh
@@ -787,13 +875,16 @@ success_task:
- remote_integration_test
- container_integration_test
- rootless_integration_test
+ # Manually triggered task. If made automatic, remove bypass
+ # in contrib/cirrus/cirrus_yaml_test.py for this task.
+ # - podman_machine
- local_system_test
- remote_system_test
- rootless_system_test
- rootless_remote_system_test
+ - buildah_bud_test
- rootless_gitlab_test
- upgrade_test
- - buildah_bud_test
- image_build
- meta
container: *smallcontainer
@@ -807,7 +898,8 @@ success_task:
artifacts_task:
name: "Artifacts"
alias: artifacts
- only_if: *not_docs
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: *not_docs_multiarch
depends_on:
- success
# This task is a secondary/convenience for downstream consumers, don't
@@ -860,8 +952,11 @@ artifacts_task:
release_task:
name: "Verify Release"
alias: release
- only_if: *tags
+ # This should _only_ run for new tags
+ # Docs: ./contrib/cirrus/CIModes.md
+ only_if: $CIRRUS_TAG != ''
depends_on:
+ - build
- success
gce_instance: *standardvm
env:
@@ -882,11 +977,13 @@ release_test_task:
name: "Optional Release Test"
alias: release_test
# Release-PRs always include "release" or "Bump" in the title
+ # Docs: ./contrib/cirrus/CIModes.md
only_if: $CIRRUS_CHANGE_TITLE =~ '.*((release)|(bump)).*'
# Allow running manually only as part of release-related builds
# see RELEASE_PROCESS.md
trigger_type: manual
depends_on:
+ - build
- success
gce_instance: *standardvm
env:
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 9a4563308..cc8b618f4 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -3,9 +3,9 @@ Thanks for sending a pull request!
Please make sure you've read our contributing guidelines and how to submit a pull request (https://github.com/containers/podman/blob/main/CONTRIBUTING.md#submitting-pull-requests).
-In case you're only changing docs, make sure to prefix the pull-request title with "[CI:DOCS]". That will prevent functional tests from running and save time and energy.
+In case you're only changing docs, make sure to prefix the pull-request title with "[CI:DOCS]". That will prevent functional tests from running and save time and energy.
-Finally, be sure to sign commits with your real name. Since by opening
+Finally, be sure to sign commits with your real name. Since by opening
a PR you already have commits, you can add signatures if needed with
something like `git commit -s --amend`.
-->
@@ -18,7 +18,7 @@ is required: Enter your extended release note in the block below. If the PR
requires additional action from users switching to the new release, include the
string "action required".
-For more information on release notes please follow the kubernetes model:
+For more information on release notes, please follow the Kubernetes model:
https://git.k8s.io/community/contributors/guide/release-notes.md
-->
diff --git a/.golangci.yml b/.golangci.yml
index 15700cee7..d8e80ec27 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -13,7 +13,6 @@ linters:
enable-all: true
disable:
# All these break for one reason or another
- - nolintlint # some linter must be disabled (see `nolint` in the code)
- tagliatelle # too many JSON keys cannot be changed due to compat
- gocognit
- testpackage
@@ -56,6 +55,8 @@ linters:
- varnamelen
- maintidx
- nilnil
+ - nonamedreturns
+ - exhaustruct
# deprecated linters
- golint # replaced by revive
- scopelint # replaced by exportloopref
@@ -64,3 +65,6 @@ linters-settings:
errcheck:
check-blank: false
ignore: fmt:.*
+ nolintlint:
+ allow-leading-space: false
+ require-specific: true
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e9f40dffe..eddd35cba 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -70,7 +70,7 @@ $ cd $GOPATH/src/github.com/containers/podman
### Deal with make
-Podman use a Makefile to realize common action like building etc...
+Podman uses a Makefile to realize common actions like building etc...
You can list available actions by using:
```shell
@@ -93,7 +93,7 @@ You need install some dependencies before building a binary.
#### Fedora
```shell
- $ sudo dnf install gpgme-devel libseccomp-devel.x86_64 libseccomp-devel.x86_64 systemd-devel
+ $ sudo dnf install gpgme-devel libseccomp-devel.x86_64 systemd-devel
$ export PKG_CONFIG_PATH="/usr/lib/pkgconfig"
```
@@ -103,8 +103,8 @@ To test your changes do `make binaries` to generate your binaries.
Your binaries are created inside the `bin/` directory and you can test your changes:
```shell
-$ bin/podman -h
-bin/podman -h
+$ bin/podman --help
+bin/podman --help
NAME:
podman - manage pods and images
@@ -150,7 +150,7 @@ Regardless of the type of PR, all PRs should include:
* additional testcases. Ideally, they should fail w/o your code change applied.
(With a few exceptions, CI hooks will block your PR unless your change
includes files named `*_test.go` or under the `test/` subdirectory. To
- bypass this block, include the string `[NO TESTS NEEDED]` in your
+ bypass this block, include the string `[NO NEW TESTS NEEDED]` in your
commit message).
* documentation changes.
diff --git a/Containerfile-testvol b/Containerfile-testvol
deleted file mode 100644
index 6ff45064b..000000000
--- a/Containerfile-testvol
+++ /dev/null
@@ -1,10 +0,0 @@
-FROM golang:1.15-alpine AS build-img
-COPY ./test/testvol/ /go/src/github.com/containers/podman/cmd/testvol/
-COPY ./vendor /go/src/github.com/containers/podman/vendor/
-WORKDIR /go/src/github.com/containers/podman
-RUN go build -o /testvol ./cmd/testvol
-
-FROM alpine
-COPY --from=build-img /testvol /usr/local/bin
-WORKDIR /
-ENTRYPOINT ["/usr/local/bin/testvol"]
diff --git a/Makefile b/Makefile
index fb9eb057d..32de58856 100644
--- a/Makefile
+++ b/Makefile
@@ -29,8 +29,6 @@ EPOCH_TEST_COMMIT ?= $(shell git merge-base $${DEST_BRANCH:-main} HEAD)
HEAD ?= HEAD
PROJECT := github.com/containers/podman
GIT_BASE_BRANCH ?= origin/main
-GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-GIT_BRANCH_CLEAN ?= $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
LIBPOD_INSTANCE := libpod_dev
PREFIX ?= /usr/local
BINDIR ?= ${PREFIX}/bin
@@ -53,10 +51,20 @@ BUILDTAGS ?= \
$(shell hack/libsubid_tag.sh) \
exclude_graphdriver_devicemapper \
seccomp
+ifeq ($(shell uname -s),FreeBSD)
+# Use bash for make's shell function - the default shell on FreeBSD
+# has a command builtin is not compatible with the way its used below
+SHELL := $(shell command -v bash)
+endif
PYTHON ?= $(shell command -v python3 python|head -n1)
PKG_MANAGER ?= $(shell command -v dnf yum|head -n1)
# ~/.local/bin is not in PATH on all systems
PRE_COMMIT = $(shell command -v bin/venv/bin/pre-commit ~/.local/bin/pre-commit pre-commit | head -n1)
+ifeq ($(shell uname -s),FreeBSD)
+SED=gsed
+else
+SED=sed
+endif
# This isn't what we actually build; it's a superset, used for target
# dependencies. Basically: all *.go and *.c files, except *_test.go,
@@ -80,18 +88,18 @@ FISHINSTALLDIR=${PREFIX}/share/fish/vendor_completions.d
SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
-GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
+GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),$(call err_if_empty,COMMIT_NO)-dirty,$(COMMIT_NO))
DATE_FMT = %s
ifdef SOURCE_DATE_EPOCH
- BUILD_INFO ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "+$(DATE_FMT)" 2>/dev/null || date -u "+$(DATE_FMT)")
+ BUILD_INFO ?= $(shell date -u -d "@$(call err_if_empty,SOURCE_DATE_EPOCH)" "+$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "+$(DATE_FMT)" 2>/dev/null || date -u "+$(DATE_FMT)")
else
BUILD_INFO ?= $(shell date "+$(DATE_FMT)")
endif
LIBPOD := ${PROJECT}/v4/libpod
GOFLAGS ?= -trimpath
LDFLAGS_PODMAN ?= \
- -X $(LIBPOD)/define.gitCommit=$(GIT_COMMIT) \
- -X $(LIBPOD)/define.buildInfo=$(BUILD_INFO) \
+ $(if $(GIT_COMMIT),-X $(LIBPOD)/define.gitCommit=$(GIT_COMMIT),) \
+ $(if $(BUILD_INFO),-X $(LIBPOD)/define.buildInfo=$(BUILD_INFO),) \
-X $(LIBPOD)/config._installPrefix=$(PREFIX) \
-X $(LIBPOD)/config._etcDir=$(ETCDIR) \
-X github.com/containers/common/pkg/config.additionalHelperBinariesDir=$(HELPER_BINARIES_DIR)\
@@ -104,10 +112,14 @@ LIBSECCOMP_COMMIT := v2.3.3
# Rarely if ever should integration tests take more than 50min,
# caller may override in special circumstances if needed.
GINKGOTIMEOUT ?= -timeout=90m
+# By default, run test/e2e
+GINKGOWHAT ?= test/e2e/.
+# By default, run tests in parallel across 3 nodes.
+GINKGONODES ?= 3
# Conditional required to produce empty-output if binary not built yet.
RELEASE_VERSION = $(shell if test -x test/version/version; then test/version/version; fi)
-RELEASE_NUMBER = $(shell echo "$(RELEASE_VERSION)" | sed -e 's/^v\(.*\)/\1/')
+RELEASE_NUMBER = $(shell echo "$(call err_if_empty,RELEASE_VERSION)" | sed -e 's/^v\(.*\)/\1/')
# If non-empty, logs all output from server during remote system testing
PODMAN_SERVER_LOG ?=
@@ -138,7 +150,7 @@ err_if_empty = $(if $(strip $($(1))),$(strip $($(1))),$(error Required variable
CGO_ENABLED ?= 1
# Default to the native OS type and architecture unless otherwise specified
NATIVE_GOOS := $(shell env -u GOOS $(GO) env GOOS)
-GOOS ?= $(NATIVE_GOOS)
+GOOS ?= $(call err_if_empty,NATIVE_GOOS)
# Default to the native architecture type
NATIVE_GOARCH := $(shell env -u GOARCH $(GO) env GOARCH)
GOARCH ?= $(NATIVE_GOARCH)
@@ -158,7 +170,7 @@ export GOOS GOARCH CGO_ENABLED BINSFX SRCBINDIR
# Need to use CGO for mDNS resolution, but cross builds need CGO disabled
# See https://github.com/golang/go/issues/12524 for details
DARWIN_GCO := 0
-ifeq ($(NATIVE_GOOS),darwin)
+ifeq ($(call err_if_empty,NATIVE_GOOS),darwin)
ifdef HOMEBREW_PREFIX
DARWIN_GCO := 1
endif
@@ -182,15 +194,19 @@ default: all
all: binaries docs
.PHONY: binaries
+ifeq ($(shell uname -s),FreeBSD)
+binaries: podman podman-remote ## Build podman and podman-remote binaries
+else
binaries: podman podman-remote rootlessport ## Build podman, podman-remote and rootlessport binaries
+endif
# Extract text following double-# for targets, as their description for
# the `help` target. Otherwise These simple-substitutions are resolved
# at reference-time (due to `=` and not `=:`).
_HLP_TGTS_RX = '^[[:print:]]+:.*?\#\# .*$$'
_HLP_TGTS_CMD = grep -E $(_HLP_TGTS_RX) $(MAKEFILE_LIST)
-_HLP_TGTS_LEN = $(shell $(_HLP_TGTS_CMD) | cut -d : -f 1 | wc -L)
-_HLPFMT = "%-$(_HLP_TGTS_LEN)s %s\n"
+_HLP_TGTS_LEN = $(shell $(call err_if_empty,_HLP_TGTS_CMD) | cut -d : -f 1 | wc -L)
+_HLPFMT = "%-$(call err_if_empty,_HLP_TGTS_LEN)s %s\n"
.PHONY: help
help: ## (Default) Print listing of key targets with their descriptions
@printf $(_HLPFMT) "Target:" "Description:"
@@ -227,11 +243,11 @@ test/checkseccomp/checkseccomp: $(wildcard test/checkseccomp/*.go)
.PHONY: test/testvol/testvol
test/testvol/testvol: $(wildcard test/testvol/*.go)
- $(GOCMD) build $(BUILDFLAGS) $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' -o $@ ./test/testvol
+ $(GOCMD) build -o $@ ./test/testvol
-.PHONY: volume-plugin-test-image
+.PHONY: volume-plugin-test-img
volume-plugin-test-img:
- podman build -t quay.io/libpod/volume-plugin-test-img -f Containerfile-testvol .
+ ./bin/podman build --network none -t quay.io/libpod/volume-plugin-test-img:$$(date +%Y%m%d) -f ./test/testvol/Containerfile .
.PHONY: test/goecho/goecho
test/goecho/goecho: $(wildcard test/goecho/*.go)
@@ -250,7 +266,7 @@ validate: lint .gitvalidation validate.completions man-page-check swagger-check
.PHONY: build-all-new-commits
build-all-new-commits:
# Validate that all the commits build on top of $(GIT_BASE_BRANCH)
- git rebase $(GIT_BASE_BRANCH) -x "$(MAKE)"
+ git rebase $(call err_if_empty,GIT_BASE_BRANCH) -x "$(MAKE)"
.PHONY: vendor
vendor:
@@ -295,7 +311,7 @@ $(SRCBINDIR)/podman$(BINSFX): $(SRCBINDIR) $(SOURCES) go.mod go.sum
$(SRCBINDIR)/podman-remote-static: $(SRCBINDIR) $(SOURCES) go.mod go.sum
CGO_ENABLED=0 \
- GOOS=$(GOOS) \
+ GOOS=linux \
GOARCH=$(GOARCH) \
$(GO) build \
$(BUILDFLAGS) \
@@ -425,7 +441,7 @@ $(MANPAGES): %: %.md .install.md2man docdir
### replaces "\" at the end of a line with two spaces
### this ensures that manpages are renderd correctly
- @sed -e 's/\((podman[^)]*\.md\(#.*\)\?)\)//g' \
+ @$(SED) -e 's/\((podman[^)]*\.md\(#.*\)\?)\)//g' \
-e 's/\[\(podman[^]]*\)\]/\1/g' \
-e 's/\[\([^]]*\)](http[^)]\+)/\1/g' \
-e 's;<\(/\)\?\(a\|a\s\+[^>]*\|sup\)>;;g' \
@@ -441,7 +457,7 @@ docs: $(MANPAGES) ## Generate documentation
# docs/remote-docs.sh requires a locally executable 'podman-remote' binary
# in addition to the target-archetecture binary (if any).
-podman-remote-%-docs: podman-remote-$(NATIVE_GOOS)
+podman-remote-%-docs: podman-remote-$(call err_if_empty,NATIVE_GOOS)
$(eval GOOS := $*)
$(MAKE) docs $(MANPAGES)
rm -rf docs/build/remote
@@ -512,7 +528,7 @@ test: localunit localintegration remoteintegration localsystem remotesystem ##
.PHONY: ginkgo-run
ginkgo-run:
ACK_GINKGO_RC=true ginkgo version
- ACK_GINKGO_RC=true ginkgo -v $(TESTFLAGS) -tags "$(TAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor -nodes 3 -debug test/e2e/. $(HACK)
+ ACK_GINKGO_RC=true ginkgo -v $(TESTFLAGS) -tags "$(TAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor -nodes $(GINKGONODES) -debug $(GINKGOWHAT) $(HACK)
.PHONY: ginkgo
ginkgo:
@@ -528,6 +544,10 @@ localintegration: test-binaries ginkgo
.PHONY: remoteintegration
remoteintegration: test-binaries ginkgo-remote
+.PHONY: localmachine
+localmachine: test-binaries
+ $(MAKE) ginkgo-run GINKGONODES=1 GINKGOWHAT=pkg/machine/e2e/. HACK=
+
.PHONY: localbenchmarks
localbenchmarks: test-binaries
PATH=$(PATH):$(shell pwd)/hack ACK_GINKGO_RC=true ginkgo \
@@ -639,7 +659,7 @@ podman-release-%.tar.gz: test/version/version
$(eval SUBDIR := podman-v$(call err_if_empty,RELEASE_NUMBER))
$(eval _DSTARGS := "DESTDIR=$(TMPDIR)/$(SUBDIR)" "PREFIX=/usr")
$(eval GOARCH := $*)
- mkdir -p "$(TMPDIR)/$(SUBDIR)"
+ mkdir -p "$(call err_if_empty,TMPDIR)/$(SUBDIR)"
$(MAKE) GOOS=$(GOOS) GOARCH=$(NATIVE_GOARCH) \
clean-binaries docs podman-remote-$(GOOS)-docs
if [[ "$(GOARCH)" != "$(NATIVE_GOARCH)" ]]; then \
@@ -660,7 +680,7 @@ podman-remote-release-%.zip: test/version/version ## Build podman-remote for %=$
$(eval GOOS := $(firstword $(subst _, ,$*)))
$(eval GOARCH := $(lastword $(subst _, ,$*)))
$(eval _GOPLAT := GOOS=$(call err_if_empty,GOOS) GOARCH=$(call err_if_empty,GOARCH))
- mkdir -p "$(TMPDIR)/$(SUBDIR)"
+ mkdir -p "$(call err_if_empty,TMPDIR)/$(SUBDIR)"
$(MAKE) GOOS=$(GOOS) GOARCH=$(NATIVE_GOARCH) \
clean-binaries podman-remote-$(GOOS)-docs
if [[ "$(GOARCH)" != "$(NATIVE_GOARCH)" ]]; then \
@@ -679,8 +699,8 @@ podman-remote-release-%.zip: test/version/version ## Build podman-remote for %=$
.PHONY: podman.msi
podman.msi: test/version/version ## Build podman-remote, package for installation on Windows
- $(MAKE) podman-v$(RELEASE_NUMBER).msi
-podman-v$(RELEASE_NUMBER).msi: podman-remote-windows podman-remote-windows-docs podman-winpath win-sshproxy
+ $(MAKE) podman-v$(call err_if_empty,RELEASE_NUMBER).msi
+podman-v%.msi: test/version/version podman-remote-windows podman-remote-windows-docs podman-winpath win-sshproxy
$(eval DOCFILE := docs/build/remote/windows)
find $(DOCFILE) -print | \
wixl-heat --var var.ManSourceDir --component-group ManFiles \
@@ -715,7 +735,7 @@ package: ## Build rpm packages
# a full path to test installed podman or you risk to call another executable.
.PHONY: package-install
package-install: package ## Install rpm packages
- sudo ${PKG_MANAGER} -y install ${HOME}/rpmbuild/RPMS/*/*.rpm
+ sudo $(call err_if_empty,PKG_MANAGER) -y install ${HOME}/rpmbuild/RPMS/*/*.rpm
/usr/bin/podman version
/usr/bin/podman info # will catch a broken conmon
@@ -741,7 +761,9 @@ install.bin:
install ${SELINUXOPT} -m 755 bin/podman $(DESTDIR)$(BINDIR)/podman
test -z "${SELINUXOPT}" || chcon --verbose --reference=$(DESTDIR)$(BINDIR)/podman bin/podman
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(LIBEXECPODMAN)
+ifneq ($(shell uname -s),FreeBSD)
install ${SELINUXOPT} -m 755 bin/rootlessport $(DESTDIR)$(LIBEXECPODMAN)/rootlessport
+endif
test -z "${SELINUXOPT}" || chcon --verbose --reference=$(DESTDIR)$(LIBEXECPODMAN)/rootlessport bin/rootlessport
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${TMPFILESDIR}
install ${SELINUXOPT} -m 644 contrib/tmpfile/podman.conf ${DESTDIR}${TMPFILESDIR}/podman.conf
@@ -755,9 +777,9 @@ install.modules-load: # This should only be used by distros which might use ipta
install.man:
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(MANDIR)/man1
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(MANDIR)/man5
- install ${SELINUXOPT} -m 644 $(filter %.1,$(MANPAGES_DEST)) -t $(DESTDIR)$(MANDIR)/man1
- install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES_DEST)) -t $(DESTDIR)$(MANDIR)/man5
- install ${SELINUXOPT} -m 644 docs/source/markdown/links/*1 -t $(DESTDIR)$(MANDIR)/man1
+ install ${SELINUXOPT} -m 644 $(filter %.1,$(MANPAGES_DEST)) $(DESTDIR)$(MANDIR)/man1
+ install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES_DEST)) $(DESTDIR)$(MANDIR)/man5
+ install ${SELINUXOPT} -m 644 docs/source/markdown/links/*1 $(DESTDIR)$(MANDIR)/man1
.PHONY: install.completions
install.completions:
@@ -794,7 +816,7 @@ ifneq (,$(findstring systemd,$(BUILDTAGS)))
PODMAN_UNIT_FILES = contrib/systemd/auto-update/podman-auto-update.service \
contrib/systemd/system/podman.service \
contrib/systemd/system/podman-restart.service \
- contrib/systemd/system/podman-play-kube@.service
+ contrib/systemd/system/podman-kube@.service
%.service: %.service.in
sed -e 's;@@PODMAN@@;$(BINDIR)/podman;g' $< >$@.tmp.$$ \
@@ -808,14 +830,14 @@ install.systemd: $(PODMAN_UNIT_FILES)
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.socket ${DESTDIR}${USERSYSTEMDDIR}/podman.socket
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.service ${DESTDIR}${USERSYSTEMDDIR}/podman.service
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-restart.service ${DESTDIR}${USERSYSTEMDDIR}/podman-restart.service
- install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-play-kube@.service ${DESTDIR}${USERSYSTEMDDIR}/podman-play-kube@.service
+ install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-kube@.service ${DESTDIR}${USERSYSTEMDDIR}/podman-kube@.service
# System services
install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.service ${DESTDIR}${SYSTEMDDIR}/podman-auto-update.service
install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.timer ${DESTDIR}${SYSTEMDDIR}/podman-auto-update.timer
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.socket ${DESTDIR}${SYSTEMDDIR}/podman.socket
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.service ${DESTDIR}${SYSTEMDDIR}/podman.service
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-restart.service ${DESTDIR}${SYSTEMDDIR}/podman-restart.service
- install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-play-kube@.service ${DESTDIR}${SYSTEMDDIR}/podman-play-kube@.service
+ install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-kube@.service ${DESTDIR}${SYSTEMDDIR}/podman-kube@.service
rm -f $(PODMAN_UNIT_FILES)
else
install.systemd:
@@ -831,7 +853,7 @@ install.tools: .install.ginkgo .install.golangci-lint .install.bats ## Install n
.PHONY: .install.golangci-lint
.install.golangci-lint:
- VERSION=1.45.2 ./hack/install_golangci.sh
+ VERSION=1.46.2 ./hack/install_golangci.sh
.PHONY: .install.md2man
.install.md2man:
diff --git a/README.md b/README.md
index 1a1e21fe3..a6292a548 100644
--- a/README.md
+++ b/README.md
@@ -3,9 +3,10 @@
# Podman: A tool for managing OCI containers and pods
Podman (the POD MANager) is a tool for managing containers and images, volumes mounted into those containers, and pods made from groups of containers.
+Podman runs containers on Linux, but can also be used on Mac and Windows systems using a Podman-managed virtual machine.
Podman is based on libpod, a library for container lifecycle management that is also contained in this repository. The libpod library provides APIs for managing containers, pods, container images, and volumes.
-* [Latest Version: 4.1.0](https://github.com/containers/podman/releases/tag/v4.1.0)
+* [Latest Version: 4.1.1](https://github.com/containers/podman/releases/tag/v4.1.1)
* Latest Remote client for Windows
* Latest Remote client for macOS
* Latest Static Remote client for Linux
@@ -24,16 +25,15 @@ At a high level, the scope of Podman and libpod is the following:
* Support for pods, groups of containers that share resources and are managed together.
* Support for running containers and pods without root or other elevated privileges.
* Resource isolation of containers and pods.
-* Support for a Docker-compatible CLI interface.
+* Support for a Docker-compatible CLI interface, which can both run containers locally and on remote systems.
* No manager daemon, for improved security and lower resource utilization at idle.
* Support for a REST API providing both a Docker-compatible interface and an improved interface exposing advanced Podman functionality.
-
-Podman presently only supports running containers on Linux. However, we are building a remote client which can run on Windows and macOS and manage Podman containers on a Linux system via the REST API using SSH tunneling.
+* Support for running on Windows and Mac via virtual machines run by `podman machine`.
## Roadmap
-1. Further improvements to the REST API, with a focus on bugfixes and implementing missing functionality
-1. Improvements on rootless containers, with a focus on improving the user experience and exposing presently-unavailable features when possible
+1. A fully-featured GUI frontend for `podman machine`
+1. Further improvements to `podman generate kube` and `podman play kube`
1. Improvements to Pods, including the addition of pod-level resource limits
## Communications
@@ -111,7 +111,7 @@ includes tables showing Docker commands and their Podman equivalent commands.
Tutorials on using Podman.
**[Remote Client](https://github.com/containers/podman/blob/main/docs/tutorials/remote_client.md)**
-A brief how-to on using the Podman remote-client.
+A brief how-to on using the Podman remote client.
**[Basic Setup and Use of Podman in a Rootless environment](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md)**
A tutorial showing the setup and configuration necessary to run Rootless Podman.
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index d4da94865..9e76bf028 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,50 @@
# Release Notes
+## 4.1.1
+### Features
+- Podman machine events are now supported on Windows.
+
+### Changes
+- The output of the `podman load` command now mirrors that of `docker load`.
+
+### Bugfixes
+- Fixed a bug where the `podman play kube` command could panic if the `--log-opt` option was used ([#13356](https://github.com/containers/podman/issues/13356)).
+- Fixed a bug where Podman could, under some circumstances, fail to parse container cgroup paths ([#14146](https://github.com/containers/podman/issues/14146)).
+- Fixed a bug where containers created with the `--sdnotify=conmon` option could send `MAINPID` twice.
+- Fixed a bug where the `podman info` command could fail when run inside an LXC container.
+- Fixed a bug where the pause image of a Pod with a custom ID mappings could not be built ([BZ 2083997](https://bugzilla.redhat.com/show_bug.cgi?id=2083997)).
+- Fixed a bug where, on `podman machine` VMs on Windows, containers could be prematurely terminated with API forwarding was not running ([#13965](https://github.com/containers/podman/issues/13965)).
+- Fixed a bug where removing a container with a zombie exec session would fail the first time, but succeed for subsequent calls ([#14252](https://github.com/containers/podman/issues/14252)).
+- Fixed a bug where a dangling ID in the database could render Podman unusable.
+- Fixed a bug where containers with memory limits could not be created when Podman was run in a root cgroup ([#14236](https://github.com/containers/podman/issues/14236)).
+- Fixed a bug where the `--security-opt` option to `podman run` and `podman create` did not support the `no-new-privileges:true` and `no-new-privileges:false` options (the only supported separator was `=`, not `:`) ([#14133](https://github.com/containers/podman/issues/14133)).
+- Fixed a bug where containers that did not create a network namespace (e.g. containers created with `--network none` or `--network ns:/path/to/ns`) could not be restored from checkpoints ([#14389](https://github.com/containers/podman/issues/14389)).
+- Fixed a bug where `podman-restart.service` could, if enabled, cause system shutdown to hang for 90 seconds ([#14434](https://github.com/containers/podman/issues/14434)).
+- Fixed a bug where the `podman stats` command would, when run as root on a container that had the `podman network disconnect` command run on it or that set a custom network interface name, return an error ([#13824](https://github.com/containers/podman/issues/13824)).
+- Fixed a bug where the remote Podman client's `podman pod create` command would error when the `--uidmap` option was used ([#14233](https://github.com/containers/podman/issues/14233)).
+- Fixed a bug where cleaning up systemd units and timers related to healthchecks was subject to race conditions and could fail.
+- Fixed a bug where the default network mode of containers created by the remote Podman client was assigned by the client, not the server ([#14368](https://github.com/containers/podman/issues/14368)).
+- Fixed a bug where containers joining a pod that was created with `--network=host` would receive a private network namespace ([#13763](https://github.com/containers/podman/issues/13763)).
+- Fixed a bug where `podman machine rm --force` would remove files related to the VM before stopping it, causing issues if removal was interrupted.
+- Fixed a bug where `podman logs` would omit the last line of a container's logs if the log did not end in a newline ([#14458](https://github.com/containers/podman/issues/14458)).
+- Fixed a bug where network cleanup was nonfunctional for containers which used a custom user namespace and were initialized via API ([#14465](https://github.com/containers/podman/issues/14465)).
+- Fixed a bug where some options (including volumes) for containers that joined pods were overwritten by the infra container ([#14454](https://github.com/containers/podman/issues/14454)).
+- Fixed a bug where the `--file-locks` option to `podman container restore` was ignored, such that file locks checkpointed by `podman container checkpoint --file-locks` were not restored.
+- Fixed a bug where signals sent to a Podman attach session with `--sig-proxy` enabled at the exact moment the container that was attached to exited could cause error messages to be printed.
+- Fixed a bug where running the `podman machine start` command more than once (simultaneously) on the same machine would cause errors.
+- Fixed a bug where the `podman stats` command could not be run on containers that were not running (it now reports all-0s statistics for Docker compatibility) ([#14498](https://github.com/containers/podman/issues/14498)).
+
+### API
+- Fixed a bug where images pulled from a private registry could not be accessed via shortname using the Compat API endpoints ([#14291](https://github.com/containers/podman/issues/14291)).
+- Fixed a bug where the Compat Delete API for Images would return an incorrect status code (500) when attempting to delete images that are in use ([#14208](https://github.com/containers/podman/issues/14208)).
+- Fixed a bug where the Compat Build API for Images would include the build's `STDERR` output even if the `quiet` parameter was true.
+- Fixed a bug where the Libpod Play Kube API would overwrite any log driver specified by query parameter with the system default.
+
+### Misc
+- The `podman auto-update` command now creates an event when it is run.
+- Error messages printed when Podman's temporary files directory is not writable have been improved.
+- Units for memory limits accepted by Podman commands were incorrectly stated by documentation as megabytes, instead of mebibytes; this has now been corrected ([#14187](https://github.com/containers/podman/issues/14187)).
+
## 4.1.0
### Features
- Podman now supports Docker Compose v2.2 and higher ([#11822](https://github.com/containers/podman/issues/11822)). Please note that it may be necessary to disable the use of Buildkit by setting the environment variable `DOCKER_BUILDKIT=0`.
diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md
index 3f63e5466..66cc74693 100644
--- a/RELEASE_PROCESS.md
+++ b/RELEASE_PROCESS.md
@@ -162,7 +162,7 @@ spelled with complete minutiae.
release branch (`git checkout upstream/vX.Y`).
1. Create a new local working-branch to develop the release PR,
`git checkout -b bump_vX.Y.Z`.
- 1. Lookup the *COMMIT ID* of the last release,
+ 1. Look up the *COMMIT ID* of the last release,
`git log -1 $(git tag | sort -V | tail -1)`.
1. Edit `version/version.go` and bump the `Version` value to the new
release version. If there were API changes, also bump `APIVersion` value.
diff --git a/cmd/podman-mac-helper/install.go b/cmd/podman-mac-helper/install.go
index a1b99e66c..713bdfcdf 100644
--- a/cmd/podman-mac-helper/install.go
+++ b/cmd/podman-mac-helper/install.go
@@ -5,6 +5,7 @@ package main
import (
"bytes"
+ "errors"
"fmt"
"io"
"io/fs"
@@ -14,7 +15,6 @@ import (
"syscall"
"text/template"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -111,7 +111,7 @@ func install(cmd *cobra.Command, args []string) error {
file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_EXCL, rw_r_r)
if err != nil {
- return errors.Wrap(err, "error creating helper plist file")
+ return fmt.Errorf("creating helper plist file: %w", err)
}
defer file.Close()
_, err = buf.WriteTo(file)
@@ -120,7 +120,7 @@ func install(cmd *cobra.Command, args []string) error {
}
if err = runDetectErr("launchctl", "load", fileName); err != nil {
- return errors.Wrap(err, "launchctl failed loading service")
+ return fmt.Errorf("launchctl failed loading service: %w", err)
}
return nil
@@ -133,13 +133,13 @@ func restrictRecursive(targetDir string, until string) error {
return err
}
if info.Mode()&fs.ModeSymlink != 0 {
- return errors.Errorf("symlinks not allowed in helper paths (remove them and rerun): %s", targetDir)
+ return fmt.Errorf("symlinks not allowed in helper paths (remove them and rerun): %s", targetDir)
}
if err = os.Chown(targetDir, 0, 0); err != nil {
- return errors.Wrap(err, "could not update ownership of helper path")
+ return fmt.Errorf("could not update ownership of helper path: %w", err)
}
if err = os.Chmod(targetDir, rwx_rx_rx|fs.ModeSticky); err != nil {
- return errors.Wrap(err, "could not update permissions of helper path")
+ return fmt.Errorf("could not update permissions of helper path: %w", err)
}
targetDir = filepath.Dir(targetDir)
}
@@ -162,7 +162,7 @@ func verifyRootDeep(path string) error {
stat := info.Sys().(*syscall.Stat_t)
if stat.Uid != 0 {
- return errors.Errorf("installation target path must be solely owned by root: %s is not", current)
+ return fmt.Errorf("installation target path must be solely owned by root: %s is not", current)
}
if info.Mode()&fs.ModeSymlink != 0 {
@@ -193,7 +193,7 @@ func verifyRootDeep(path string) error {
func installExecutable(user string) (string, error) {
// Since the installed executable runs as root, as a precaution verify root ownership of
- // the entire installation path, and utilize sticky + read only perms for the helper path
+ // the entire installation path, and utilize sticky + read-only perms for the helper path
// suffix. The goal is to help users harden against privilege escalation from loose
// filesystem permissions.
//
@@ -206,7 +206,7 @@ func installExecutable(user string) (string, error) {
targetDir := filepath.Join(installPrefix, "podman", "helper", user)
if err := os.MkdirAll(targetDir, rwx_rx_rx); err != nil {
- return "", errors.Wrap(err, "could not create helper directory structure")
+ return "", fmt.Errorf("could not create helper directory structure: %w", err)
}
// Correct any incorrect perms on previously existing directories and verify no symlinks
diff --git a/cmd/podman-mac-helper/main.go b/cmd/podman-mac-helper/main.go
index 8d995519f..937cb8433 100644
--- a/cmd/podman-mac-helper/main.go
+++ b/cmd/podman-mac-helper/main.go
@@ -4,6 +4,7 @@
package main
import (
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -13,7 +14,6 @@ import (
"strconv"
"strings"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -73,7 +73,7 @@ func getUserInfo(name string) (string, string, string, error) {
entry := readCapped(output)
elements := strings.Split(entry, ":")
if len(elements) < 9 || elements[0] != name {
- return "", "", "", errors.New("Could not lookup user")
+ return "", "", "", errors.New("Could not look up user")
}
return elements[0], elements[2], elements[8], nil
@@ -90,14 +90,14 @@ func getUser() (string, string, string, error) {
_, uid, home, err := getUserInfo(name)
if err != nil {
- return "", "", "", fmt.Errorf("could not lookup user: %s", name)
+ return "", "", "", fmt.Errorf("could not look up user: %s", name)
}
id, err := strconv.Atoi(uid)
if err != nil {
return "", "", "", fmt.Errorf("invalid uid for user: %s", name)
}
if id == 0 {
- return "", "", "", fmt.Errorf("unexpected root user")
+ return "", "", "", errors.New("unexpected root user")
}
return name, uid, home, nil
diff --git a/cmd/podman-mac-helper/uninstall.go b/cmd/podman-mac-helper/uninstall.go
index f72d0efd1..0c21b27e9 100644
--- a/cmd/podman-mac-helper/uninstall.go
+++ b/cmd/podman-mac-helper/uninstall.go
@@ -9,7 +9,6 @@ import (
"os/exec"
"path/filepath"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -48,13 +47,13 @@ func uninstall(cmd *cobra.Command, args []string) error {
if err := os.Remove(fileName); err != nil {
if !os.IsNotExist(err) {
- return errors.Errorf("could not remove plist file: %s", fileName)
+ return fmt.Errorf("could not remove plist file: %s", fileName)
}
}
helperPath := filepath.Join(installPrefix, "podman", "helper", userName)
if err := os.RemoveAll(helperPath); err != nil {
- return errors.Errorf("could not remove helper binary path: %s", helperPath)
+ return fmt.Errorf("could not remove helper binary path: %s", helperPath)
}
return nil
}
diff --git a/cmd/podman/auto-update.go b/cmd/podman/auto-update.go
index 1dc29530e..88ef0ec88 100644
--- a/cmd/podman/auto-update.go
+++ b/cmd/podman/auto-update.go
@@ -13,7 +13,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -63,7 +62,7 @@ func init() {
func autoUpdate(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
// Backwards compat. System tests expect this error string.
- return errors.Errorf("`%s` takes no arguments", cmd.CommandPath())
+ return fmt.Errorf("`%s` takes no arguments", cmd.CommandPath())
}
allReports, failures := registry.ContainerEngine().AutoUpdate(registry.GetContext(), autoUpdateOptions.AutoUpdateOptions)
diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go
index 58dff3578..6e6c33f9b 100644
--- a/cmd/podman/common/completion.go
+++ b/cmd/podman/common/completion.go
@@ -4,9 +4,13 @@ import (
"bufio"
"fmt"
"os"
+ "path"
"reflect"
+ "strconv"
"strings"
+ "unicode"
+ libimageDefine "github.com/containers/common/libimage/define"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
@@ -15,8 +19,10 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/rootless"
+ "github.com/containers/podman/v4/pkg/signal"
systemdDefine "github.com/containers/podman/v4/pkg/systemd/define"
"github.com/containers/podman/v4/pkg/util"
+ securejoin "github.com/cyphar/filepath-securejoin"
"github.com/spf13/cobra"
)
@@ -65,7 +71,7 @@ func setupImageEngine(cmd *cobra.Command) (entities.ImageEngine, error) {
return nil, err
}
// we also need to set up the container engine since this
- // is required to setup the rootless namespace
+ // is required to set up the rootless namespace
if _, err = setupContainerEngine(cmd); err != nil {
return nil, err
}
@@ -278,6 +284,90 @@ func getNetworks(cmd *cobra.Command, toComplete string, cType completeType) ([]s
return suggestions, cobra.ShellCompDirectiveNoFileComp
}
+func fdIsNotDir(f *os.File) bool {
+ stat, err := f.Stat()
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ return true
+ }
+ return !stat.IsDir()
+}
+
+func getPathCompletion(root string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if toComplete == "" {
+ toComplete = "/"
+ }
+ // Important: securejoin is required to make sure we never leave the root mount point
+ userpath, err := securejoin.SecureJoin(root, toComplete)
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ var base string
+ f, err := os.Open(userpath)
+ // when error or file is not dir get the parent path to stat
+ if err != nil || fdIsNotDir(f) {
+ // Do not use path.Dir() since this cleans the paths which
+ // then no longer matches the user input.
+ userpath, base = path.Split(userpath)
+ toComplete, _ = path.Split(toComplete)
+ f, err = os.Open(userpath)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ }
+
+ if fdIsNotDir(f) {
+ // nothing to complete since it is no dir
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ entries, err := f.ReadDir(-1)
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ if len(entries) == 0 {
+ // path is empty dir, just add the trailing slash and no space
+ if !strings.HasSuffix(toComplete, "/") {
+ toComplete += "/"
+ }
+ return []string{toComplete}, cobra.ShellCompDirectiveDefault | cobra.ShellCompDirectiveNoSpace
+ }
+ completions := make([]string, 0, len(entries))
+ count := 0
+ for _, e := range entries {
+ if strings.HasPrefix(e.Name(), base) {
+ suf := ""
+ // When the entry is an directory we add the "/" as suffix and do not want to add space
+ // to match normal shell completion behavior.
+ // Just inc counter again to fake more than one entry in this case and thus get no space.
+ if e.IsDir() {
+ suf = "/"
+ count++
+ }
+ completions = append(completions, simplePathJoinUnix(toComplete, e.Name()+suf))
+ count++
+ }
+ }
+ directive := cobra.ShellCompDirectiveDefault
+ if count > 1 {
+ // when we have more than one match we do not want to add a space after the completion
+ directive |= cobra.ShellCompDirectiveNoSpace
+ }
+ return completions, directive
+}
+
+// simplePathJoinUnix joins to path components by adding a slash only if p1 doesn't end with one.
+// We cannot use path.Join() for the completions logic because this one always calls Clean() on
+// the path which changes it from the input.
+func simplePathJoinUnix(p1, p2 string) string {
+ if p1[len(p1)-1] == '/' {
+ return p1 + p2
+ }
+ return p1 + "/" + p2
+}
+
// validCurrentCmdLine validates the current cmd line
// It utilizes the Args function from the cmd struct
// In most cases the Args function validates the args length but it
@@ -430,6 +520,16 @@ func AutocompletePodsRunning(cmd *cobra.Command, args []string, toComplete strin
return getPods(cmd, toComplete, completeDefault, "running", "degraded")
}
+// AutoCompletePodsPause - Autocomplete only paused pod names
+// When a pod has a few containers paused, that ends up in degraded state
+// So autocomplete degraded pod names as well
+func AutoCompletePodsPause(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if !validCurrentCmdLine(cmd, args, toComplete) {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return getPods(cmd, toComplete, completeDefault, "paused", "degraded")
+}
+
// AutocompleteForKube - Autocomplete all Podman objects supported by kube generate.
func AutocompleteForKube(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if !validCurrentCmdLine(cmd, args, toComplete) {
@@ -495,6 +595,11 @@ func AutocompleteImages(cmd *cobra.Command, args []string, toComplete string) ([
return getImages(cmd, toComplete)
}
+// AutocompleteImageSearchFilters - Autocomplate `search --filter`.
+func AutocompleteImageSearchFilters(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return libimageDefine.SearchFilters, cobra.ShellCompDirectiveNoFileComp
+}
+
// AutocompletePodExitPolicy - Autocomplete pod exit policy.
func AutocompletePodExitPolicy(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return config.PodExitPolicies, cobra.ShellCompDirectiveNoFileComp
@@ -514,8 +619,32 @@ func AutocompleteCreateRun(cmd *cobra.Command, args []string, toComplete string)
}
return getImages(cmd, toComplete)
}
- // TODO: add path completion for files in the image
- return nil, cobra.ShellCompDirectiveDefault
+ // Mount the image and provide path completion
+ engine, err := setupImageEngine(cmd)
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ resp, err := engine.Mount(registry.Context(), []string{args[0]}, entities.ImageMountOptions{})
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ defer func() {
+ _, err := engine.Unmount(registry.Context(), []string{args[0]}, entities.ImageUnmountOptions{})
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ }
+ }()
+ if len(resp) != 1 {
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ // So this uses ShellCompDirectiveDefault to also still provide normal shell
+ // completion in case no path matches. This is useful if someone tries to get
+ // completion for paths that are not available in the image, e.g. /proc/...
+ return getPathCompletion(resp[0].Path, toComplete)
}
// AutocompleteRegistries - Autocomplete registries.
@@ -563,14 +692,40 @@ func AutocompleteCpCommand(cmd *cobra.Command, args []string, toComplete string)
return nil, cobra.ShellCompDirectiveNoFileComp
}
if len(args) < 2 {
+ if i := strings.IndexByte(toComplete, ':'); i > -1 {
+ // Looks like the user already set the container.
+ // Lets mount it and provide path completion for files in the container.
+ engine, err := setupContainerEngine(cmd)
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ resp, err := engine.ContainerMount(registry.Context(), []string{toComplete[:i]}, entities.ContainerMountOptions{})
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ defer func() {
+ _, err := engine.ContainerUnmount(registry.Context(), []string{toComplete[:i]}, entities.ContainerUnmountOptions{})
+ if err != nil {
+ cobra.CompErrorln(err.Error())
+ }
+ }()
+ if len(resp) != 1 {
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ comps, directive := getPathCompletion(resp[0].Path, toComplete[i+1:])
+ return prefixSlice(toComplete[:i+1], comps), directive
+ }
+ // Suggest containers when they match the input otherwise normal shell completion is used
containers, _ := getContainers(cmd, toComplete, completeDefault)
for _, container := range containers {
- // TODO: Add path completion for inside the container if possible
if strings.HasPrefix(container, toComplete) {
- return containers, cobra.ShellCompDirectiveNoSpace
+ return suffixCompSlice(":", containers), cobra.ShellCompDirectiveNoSpace
}
}
- // else complete paths
+ // else complete paths on the host
return nil, cobra.ShellCompDirectiveDefault
}
// don't complete more than 2 args
@@ -594,7 +749,9 @@ func AutocompleteRunlabelCommand(cmd *cobra.Command, args []string, toComplete s
return nil, cobra.ShellCompDirectiveNoFileComp
}
if len(args) == 0 {
- // FIXME: What labels can we recommend here?
+ // This is unfortunate because the argument order is label followed by image.
+ // If it would be the other way around we could inspect the first arg and get
+ // all labels from it to suggest them.
return nil, cobra.ShellCompDirectiveNoFileComp
}
if len(args) == 1 {
@@ -799,8 +956,7 @@ func AutocompleteLogDriver(cmd *cobra.Command, args []string, toComplete string)
// AutocompleteLogOpt - Autocomplete log-opt options.
// -> "path=", "tag="
func AutocompleteLogOpt(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- // FIXME: are these the only one? the man page states these but in the current shell completion they are more options
- logOptions := []string{"path=", "tag="}
+ logOptions := []string{"path=", "tag=", "max-size="}
if strings.HasPrefix(toComplete, "path=") {
return nil, cobra.ShellCompDirectiveDefault
}
@@ -810,7 +966,7 @@ func AutocompleteLogOpt(cmd *cobra.Command, args []string, toComplete string) ([
// AutocompletePullOption - Autocomplete pull options for create and run command.
// -> "always", "missing", "never"
func AutocompletePullOption(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- pullOptions := []string{"always", "missing", "never"}
+ pullOptions := []string{"always", "missing", "never", "newer"}
return pullOptions, cobra.ShellCompDirectiveNoFileComp
}
@@ -839,10 +995,26 @@ func AutocompleteSecurityOption(cmd *cobra.Command, args []string, toComplete st
}
// AutocompleteStopSignal - Autocomplete stop signal options.
-// -> "SIGHUP", "SIGINT", "SIGKILL", "SIGTERM"
+// Autocompletes signals both lower or uppercase depending on the user input.
func AutocompleteStopSignal(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- // FIXME: add more/different signals?
- stopSignals := []string{"SIGHUP", "SIGINT", "SIGKILL", "SIGTERM"}
+ // convertCase will convert a string to lowercase only if the user input is lowercase
+ convertCase := func(s string) string { return s }
+ if len(toComplete) > 0 && unicode.IsLower(rune(toComplete[0])) {
+ convertCase = strings.ToLower
+ }
+
+ prefix := ""
+ // if input starts with "SI" we have to add SIG in front
+ // since the signal map does not have this prefix but the option
+ // allows signals with and without SIG prefix
+ if strings.HasPrefix(toComplete, convertCase("SI")) {
+ prefix = "SIG"
+ }
+
+ stopSignals := make([]string, 0, len(signal.SignalMap))
+ for sig := range signal.SignalMap {
+ stopSignals = append(stopSignals, convertCase(prefix+sig))
+ }
return stopSignals, cobra.ShellCompDirectiveNoFileComp
}
@@ -963,9 +1135,22 @@ func AutocompleteNetworkFlag(cmd *cobra.Command, args []string, toComplete strin
return append(networks, suggestions...), dir
}
+type formatSuggestion struct {
+ fieldname string
+ suffix string
+}
+
+func convertFormatSuggestions(suggestions []formatSuggestion) []string {
+ completions := make([]string, 0, len(suggestions))
+ for _, f := range suggestions {
+ completions = append(completions, f.fieldname+f.suffix)
+ }
+ return completions
+}
+
// AutocompleteFormat - Autocomplete json or a given struct to use for a go template.
// The input can be nil, In this case only json will be autocompleted.
-// This function will only work for structs other types are not supported.
+// This function will only work for pointer to structs other types are not supported.
// When "{{." is typed the field and method names of the given struct will be completed.
// This also works recursive for nested structs.
func AutocompleteFormat(o interface{}) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
@@ -994,6 +1179,12 @@ func AutocompleteFormat(o interface{}) func(cmd *cobra.Command, args []string, t
// split this into it struct field names
fields := strings.Split(field[len(field)-1], ".")
f := reflect.ValueOf(o)
+ if f.Kind() != reflect.Ptr {
+ // We panic here to make sure that all callers pass the value by reference.
+ // If someone passes a by value then all podman commands will panic since
+ // this function is run at init time.
+ panic("AutocompleteFormat: passed value must be a pointer to a struct")
+ }
for i := 1; i < len(fields); i++ {
// last field get all names to suggest
if i == len(fields)-1 {
@@ -1002,61 +1193,83 @@ func AutocompleteFormat(o interface{}) func(cmd *cobra.Command, args []string, t
toCompArr := strings.Split(toComplete, ".")
toCompArr[len(toCompArr)-1] = ""
toComplete = strings.Join(toCompArr, ".")
- return prefixSlice(toComplete, suggestions), cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp
+ return prefixSlice(toComplete, convertFormatSuggestions(suggestions)), cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp
}
- val := getActualStructType(f)
- if val == nil {
- // no struct return nothing to complete
+ // first follow pointer and create element when it is nil
+ f = actualReflectValue(f)
+ switch f.Kind() {
+ case reflect.Struct:
+ for j := 0; j < f.NumField(); j++ {
+ field := f.Type().Field(j)
+ // ok this is a bit weird but when we have an embedded nil struct
+ // calling FieldByName on a name which is present on this struct will panic
+ // Therefore we have to init them (non nil ptr), https://github.com/containers/podman/issues/14223
+ if field.Anonymous && f.Field(j).Type().Kind() == reflect.Ptr {
+ f.Field(j).Set(reflect.New(f.Field(j).Type().Elem()))
+ }
+ }
+ // set the next struct field
+ f = f.FieldByName(fields[i])
+ case reflect.Map:
+ rtype := f.Type().Elem()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem()
+ }
+ f = reflect.New(rtype)
+ case reflect.Func:
+ if f.Type().NumOut() != 1 {
+ // unsupported type return nothing
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ f = reflect.New(f.Type().Out(0))
+ default:
+ // unsupported type return nothing
return nil, cobra.ShellCompDirectiveNoFileComp
}
- f = *val
-
- // set the next struct field
- f = f.FieldByName(fields[i])
}
return nil, cobra.ShellCompDirectiveNoFileComp
}
}
-// getActualStructType take the value and check if it is a struct,
+// actualReflectValue takes the value,
// if it is pointer it will dereference it and when it is nil,
-// it will create a new value from it to get the actual struct
-// returns nil when type is not a struct
-func getActualStructType(f reflect.Value) *reflect.Value {
+// it will create a new value from it
+func actualReflectValue(f reflect.Value) reflect.Value {
// follow the pointer first
if f.Kind() == reflect.Ptr {
// if the pointer is nil we create a new value from the elements type
- // this allows us to follow nil pointers and get the actual struct fields
+ // this allows us to follow nil pointers and get the actual type
if f.IsNil() {
f = reflect.New(f.Type().Elem())
}
f = f.Elem()
}
- // we only support structs
- if f.Kind() != reflect.Struct {
- return nil
- }
- return &f
+ return f
}
// getStructFields reads all struct field names and method names and returns them.
-func getStructFields(f reflect.Value, prefix string) []string {
- var suggestions []string
+func getStructFields(f reflect.Value, prefix string) []formatSuggestion {
+ var suggestions []formatSuggestion
if f.IsValid() {
suggestions = append(suggestions, getMethodNames(f, prefix)...)
}
- val := getActualStructType(f)
- if val == nil {
- // no struct return nothing to complete
+ f = actualReflectValue(f)
+ // we only support structs
+ if f.Kind() != reflect.Struct {
return suggestions
}
- f = *val
+ var anonymous []formatSuggestion
// loop over all field names
for j := 0; j < f.NumField(); j++ {
field := f.Type().Field(j)
+ // check if struct field is not exported, templates only use exported fields
+ // PkgPath is always empty for exported fields
+ if field.PkgPath != "" {
+ continue
+ }
fname := field.Name
suffix := "}}"
kind := field.Type.Kind()
@@ -1065,27 +1278,63 @@ func getStructFields(f reflect.Value, prefix string) []string {
kind = field.Type.Elem().Kind()
}
// when we have a nested struct do not append braces instead append a dot
- if kind == reflect.Struct {
+ if kind == reflect.Struct || kind == reflect.Map {
suffix = "."
}
// if field is anonymous add the child fields as well
if field.Anonymous {
- suggestions = append(suggestions, getStructFields(f.Field(j), prefix)...)
- } else if strings.HasPrefix(fname, prefix) {
+ anonymous = append(anonymous, getStructFields(f.Field(j), prefix)...)
+ }
+ if strings.HasPrefix(fname, prefix) {
// add field name with suffix
- suggestions = append(suggestions, fname+suffix)
+ suggestions = append(suggestions, formatSuggestion{fieldname: fname, suffix: suffix})
+ }
+ }
+outer:
+ for _, ano := range anonymous {
+ // we should only add anonymous child fields if they are not already present.
+ for _, sug := range suggestions {
+ if ano.fieldname == sug.fieldname {
+ continue outer
+ }
}
+ suggestions = append(suggestions, ano)
}
return suggestions
}
-func getMethodNames(f reflect.Value, prefix string) []string {
- suggestions := make([]string, 0, f.NumMethod())
+func getMethodNames(f reflect.Value, prefix string) []formatSuggestion {
+ suggestions := make([]formatSuggestion, 0, f.NumMethod())
for j := 0; j < f.NumMethod(); j++ {
- fname := f.Type().Method(j).Name
+ method := f.Type().Method(j)
+ // in a template we can only run functions with one return value
+ if method.Func.Type().NumOut() != 1 {
+ continue
+ }
+ // when we have a nested struct do not append braces instead append a dot
+ kind := method.Func.Type().Out(0).Kind()
+ suffix := "}}"
+ if kind == reflect.Struct || kind == reflect.Map {
+ suffix = "."
+ }
+ // From a template users POV it is not important when the use a struct field or method.
+ // They only notice the difference when the function requires arguments.
+ // So lets be nice and let the user know that this method requires arguments via the help text.
+ // Note since this is actually a method on a type the first argument is always fix so we should skip it.
+ num := method.Func.Type().NumIn() - 1
+ if num > 0 {
+ // everything after tab will the completion scripts show as help when enabled
+ // overwrite the suffix because it expects the args
+ suffix = "\tThis is a function and requires " + strconv.Itoa(num) + " argument"
+ if num > 1 {
+ // add plural s
+ suffix += "s"
+ }
+ }
+ fname := method.Name
if strings.HasPrefix(fname, prefix) {
// add method name with closing braces
- suggestions = append(suggestions, fname+"}}")
+ suggestions = append(suggestions, formatSuggestion{fieldname: fname, suffix: suffix})
}
}
return suggestions
@@ -1368,8 +1617,14 @@ func AutocompleteClone(cmd *cobra.Command, args []string, toComplete string) ([]
}
switch len(args) {
case 0:
+ if cmd.Parent().Name() == "pod" { // needs to be " pod " to exclude 'podman'
+ return getPods(cmd, toComplete, completeDefault)
+ }
return getContainers(cmd, toComplete, completeDefault)
case 2:
+ if cmd.Parent().Name() == "pod" {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
return getImages(cmd, toComplete)
}
return nil, cobra.ShellCompDirectiveNoFileComp
diff --git a/cmd/podman/common/completion_test.go b/cmd/podman/common/completion_test.go
index 13f45a662..d8be48ad7 100644
--- a/cmd/podman/common/completion_test.go
+++ b/cmd/podman/common/completion_test.go
@@ -14,11 +14,29 @@ type Car struct {
HP *int
Displacement int
}
- Extras map[string]string
+ Extras map[string]Extra
+ // also ensure it will work with pointers
+ Extras2 map[string]*Extra
+}
+
+type Extra struct {
+ Name1 string
+ Name2 string
}
type Anonymous struct {
Hello string
+ // The name should match the testStruct Name below. This is used to make
+ // sure the logic uses the actual struct fields before the embedded ones.
+ Name struct {
+ Suffix string
+ Prefix string
+ }
+}
+
+// The name should match the testStruct Age name below.
+func (a Anonymous) Age() int {
+ return 0
}
func (c Car) Type() string {
@@ -31,6 +49,20 @@ func (c *Car) Color() string {
return ""
}
+// This is for reflect testing required.
+//nolint:unused
+func (c Car) internal() int {
+ return 0
+}
+
+func (c Car) TwoOut() (string, string) {
+ return "", ""
+}
+
+func (c Car) Struct() Car {
+ return Car{}
+}
+
func TestAutocompleteFormat(t *testing.T) {
testStruct := struct {
Name string
@@ -38,10 +70,10 @@ func TestAutocompleteFormat(t *testing.T) {
Car *Car
Car2 *Car
*Anonymous
+ private int
}{}
testStruct.Car = &Car{}
- testStruct.Car.Extras = map[string]string{"test": "1"}
tests := []struct {
name string
@@ -76,17 +108,17 @@ func TestAutocompleteFormat(t *testing.T) {
{
"invalid completion",
"{{ ..",
- nil,
+ []string{},
},
{
"fist level struct field name",
"{{.",
- []string{"{{.Name}}", "{{.Age}}", "{{.Car.", "{{.Car2.", "{{.Hello}}"},
+ []string{"{{.Name}}", "{{.Age}}", "{{.Car.", "{{.Car2.", "{{.Anonymous.", "{{.Hello}}"},
},
{
"fist level struct field name",
"{{ .",
- []string{"{{ .Name}}", "{{ .Age}}", "{{ .Car.", "{{ .Car2.", "{{ .Hello}}"},
+ []string{"{{ .Name}}", "{{ .Age}}", "{{ .Car.", "{{ .Car2.", "{{ .Anonymous.", "{{ .Hello}}"},
},
{
"fist level struct field name",
@@ -96,7 +128,7 @@ func TestAutocompleteFormat(t *testing.T) {
{
"second level struct field name",
"{{ .Car.",
- []string{"{{ .Car.Color}}", "{{ .Car.Type}}", "{{ .Car.Brand}}", "{{ .Car.Stats.", "{{ .Car.Extras}}"},
+ []string{"{{ .Car.Color}}", "{{ .Car.Struct.", "{{ .Car.Type}}", "{{ .Car.Brand}}", "{{ .Car.Stats.", "{{ .Car.Extras.", "{{ .Car.Extras2."},
},
{
"second level struct field name",
@@ -106,7 +138,7 @@ func TestAutocompleteFormat(t *testing.T) {
{
"second level nil struct field name",
"{{ .Car2.",
- []string{"{{ .Car2.Color}}", "{{ .Car2.Type}}", "{{ .Car2.Brand}}", "{{ .Car2.Stats.", "{{ .Car2.Extras}}"},
+ []string{"{{ .Car2.Color}}", "{{ .Car2.Struct.", "{{ .Car2.Type}}", "{{ .Car2.Brand}}", "{{ .Car2.Stats.", "{{ .Car2.Extras.", "{{ .Car2.Extras2."},
},
{
"three level struct field name",
@@ -126,28 +158,44 @@ func TestAutocompleteFormat(t *testing.T) {
{
"invalid field name",
"{{ .Ca.B",
- nil,
+ []string{},
},
{
"map key names don't work",
"{{ .Car.Extras.",
- nil,
+ []string{},
+ },
+ {
+ "map values work",
+ "{{ .Car.Extras.somekey.",
+ []string{"{{ .Car.Extras.somekey.Name1}}", "{{ .Car.Extras.somekey.Name2}}"},
+ },
+ {
+ "map values work with ptr",
+ "{{ .Car.Extras2.somekey.",
+ []string{"{{ .Car.Extras2.somekey.Name1}}", "{{ .Car.Extras2.somekey.Name2}}"},
},
{
"two variables struct field name",
"{{ .Car.Brand }} {{ .Car.",
- []string{"{{ .Car.Brand }} {{ .Car.Color}}", "{{ .Car.Brand }} {{ .Car.Type}}", "{{ .Car.Brand }} {{ .Car.Brand}}",
- "{{ .Car.Brand }} {{ .Car.Stats.", "{{ .Car.Brand }} {{ .Car.Extras}}"},
+ []string{"{{ .Car.Brand }} {{ .Car.Color}}", "{{ .Car.Brand }} {{ .Car.Struct.", "{{ .Car.Brand }} {{ .Car.Type}}",
+ "{{ .Car.Brand }} {{ .Car.Brand}}", "{{ .Car.Brand }} {{ .Car.Stats.", "{{ .Car.Brand }} {{ .Car.Extras.",
+ "{{ .Car.Brand }} {{ .Car.Extras2."},
},
{
"only dot without variable",
".",
nil,
},
+ {
+ "access embedded nil struct field",
+ "{{.Hello.",
+ []string{},
+ },
}
for _, test := range tests {
- completion, directive := common.AutocompleteFormat(testStruct)(nil, nil, test.toComplete)
+ completion, directive := common.AutocompleteFormat(&testStruct)(nil, nil, test.toComplete)
// directive should always be greater than ShellCompDirectiveNoFileComp
assert.GreaterOrEqual(t, directive, cobra.ShellCompDirectiveNoFileComp, "unexpected ShellCompDirective")
assert.Equal(t, test.expected, completion, test.name)
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index d28becc8a..923d0517f 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -12,7 +12,7 @@ import (
"github.com/spf13/cobra"
)
-const sizeWithUnitFormat = "(format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))"
+const sizeWithUnitFormat = "(format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))"
var containerConfig = registry.PodmanConfig()
@@ -98,7 +98,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
cgroupsFlagName := "cgroups"
createFlags.StringVar(
&cf.CgroupsMode,
- cgroupsFlagName, cgroupConfig(),
+ cgroupsFlagName, cf.CgroupsMode,
`control container cgroup configuration ("enabled"|"disabled"|"no-conmon"|"split")`,
)
_ = cmd.RegisterFlagCompletionFunc(cgroupsFlagName, AutocompleteCgroupMode)
@@ -150,7 +150,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
envFlagName := "env"
createFlags.StringArrayP(
- envFlagName, "e", env(),
+ envFlagName, "e", Env(),
"Set environment variables in container",
)
_ = cmd.RegisterFlagCompletionFunc(envFlagName, completion.AutocompleteNone)
@@ -255,9 +255,8 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
_ = cmd.RegisterFlagCompletionFunc(hostUserFlagName, completion.AutocompleteNone)
imageVolumeFlagName := "image-volume"
- createFlags.StringVar(
- &cf.ImageVolume,
- imageVolumeFlagName, DefaultImageVolume,
+ createFlags.String(
+ imageVolumeFlagName, cf.ImageVolume,
`Tells podman how to handle the builtin image volumes ("bind"|"tmpfs"|"ignore")`,
)
_ = cmd.RegisterFlagCompletionFunc(imageVolumeFlagName, AutocompleteImageVolume)
@@ -299,7 +298,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
logDriverFlagName := "log-driver"
createFlags.StringVar(
&cf.LogDriver,
- logDriverFlagName, LogDriver(),
+ logDriverFlagName, cf.LogDriver,
"Logging driver for the container",
)
_ = cmd.RegisterFlagCompletionFunc(logDriverFlagName, AutocompleteLogDriver)
@@ -390,8 +389,8 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
pullFlagName := "pull"
createFlags.StringVar(
&cf.Pull,
- pullFlagName, policy(),
- `Pull image before creating ("always"|"missing"|"never")`,
+ pullFlagName, cf.Pull,
+ `Pull image policy`,
)
_ = cmd.RegisterFlagCompletionFunc(pullFlagName, AutocompletePullOption)
@@ -407,7 +406,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
createFlags.BoolVar(
&cf.ReadOnlyTmpFS,
- "read-only-tmpfs", true,
+ "read-only-tmpfs", cf.ReadOnlyTmpFS,
"When running containers in read-only mode mount a read-write tmpfs on /run, /tmp and /var/tmp",
)
requiresFlagName := "requires"
@@ -440,7 +439,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
sdnotifyFlagName := "sdnotify"
createFlags.StringVar(
&cf.SdNotifyMode,
- sdnotifyFlagName, define.SdNotifyModeContainer,
+ sdnotifyFlagName, cf.SdNotifyMode,
`control sd-notify behavior ("container"|"conmon"|"ignore")`,
)
_ = cmd.RegisterFlagCompletionFunc(sdnotifyFlagName, AutocompleteSDNotify)
@@ -453,13 +452,6 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(secretFlagName, AutocompleteSecrets)
- shmSizeFlagName := "shm-size"
- createFlags.String(
- shmSizeFlagName, shmSize(),
- "Size of /dev/shm "+sizeWithUnitFormat,
- )
- _ = cmd.RegisterFlagCompletionFunc(shmSizeFlagName, completion.AutocompleteNone)
-
stopSignalFlagName := "stop-signal"
createFlags.StringVar(
&cf.StopSignal,
@@ -471,7 +463,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
stopTimeoutFlagName := "stop-timeout"
createFlags.UintVar(
&cf.StopTimeout,
- stopTimeoutFlagName, containerConfig.Engine.StopTimeout,
+ stopTimeoutFlagName, cf.StopTimeout,
"Timeout (in seconds) that containers stopped by user command have to exit. If exceeded, the container will be forcibly stopped via SIGKILL.",
)
_ = cmd.RegisterFlagCompletionFunc(stopTimeoutFlagName, completion.AutocompleteNone)
@@ -479,7 +471,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
systemdFlagName := "systemd"
createFlags.StringVar(
&cf.Systemd,
- systemdFlagName, "true",
+ systemdFlagName, cf.Systemd,
`Run container in systemd mode ("true"|"false"|"always")`,
)
_ = cmd.RegisterFlagCompletionFunc(systemdFlagName, AutocompleteSystemdFlag)
@@ -523,7 +515,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
timezoneFlagName := "tz"
createFlags.StringVar(
&cf.Timezone,
- timezoneFlagName, containerConfig.TZ(),
+ timezoneFlagName, cf.Timezone,
"Set timezone in container",
)
_ = cmd.RegisterFlagCompletionFunc(timezoneFlagName, completion.AutocompleteNone) //TODO: add timezone completion
@@ -531,7 +523,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
umaskFlagName := "umask"
createFlags.StringVar(
&cf.Umask,
- umaskFlagName, containerConfig.Umask(),
+ umaskFlagName, cf.Umask,
"Set umask in container",
)
_ = cmd.RegisterFlagCompletionFunc(umaskFlagName, completion.AutocompleteNone)
@@ -539,7 +531,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
ulimitFlagName := "ulimit"
createFlags.StringSliceVar(
&cf.Ulimit,
- ulimitFlagName, ulimits(),
+ ulimitFlagName, cf.Ulimit,
"Ulimit options",
)
_ = cmd.RegisterFlagCompletionFunc(ulimitFlagName, completion.AutocompleteNone)
@@ -552,13 +544,6 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(userFlagName, AutocompleteUserFlag)
- utsFlagName := "uts"
- createFlags.String(
- utsFlagName, "",
- "UTS namespace to use",
- )
- _ = cmd.RegisterFlagCompletionFunc(utsFlagName, AutocompleteNamespace)
-
mountFlagName := "mount"
createFlags.StringArrayVar(
&cf.Mount,
@@ -578,7 +563,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
seccompPolicyFlagName := "seccomp-policy"
createFlags.StringVar(
&cf.SeccompPolicy,
- seccompPolicyFlagName, "default",
+ seccompPolicyFlagName, cf.SeccompPolicy,
"Policy for selecting a seccomp profile (experimental)",
)
_ = cmd.RegisterFlagCompletionFunc(seccompPolicyFlagName, completion.AutocompleteDefault)
@@ -629,6 +614,13 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
}
if isInfra || (!clone && !isInfra) { // infra container flags, create should also pick these up
+ shmSizeFlagName := "shm-size"
+ createFlags.String(
+ shmSizeFlagName, shmSize(),
+ "Size of /dev/shm "+sizeWithUnitFormat,
+ )
+ _ = cmd.RegisterFlagCompletionFunc(shmSizeFlagName, completion.AutocompleteNone)
+
sysctlFlagName := "sysctl"
createFlags.StringSliceVar(
&cf.Sysctl,
@@ -685,6 +677,14 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(usernsFlagName, AutocompleteUserNamespace)
+ utsFlagName := "uts"
+ createFlags.StringVar(
+ &cf.UTS,
+ utsFlagName, "",
+ "UTS namespace to use",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(utsFlagName, AutocompleteNamespace)
+
cgroupParentFlagName := "cgroup-parent"
createFlags.StringVar(
&cf.CgroupParent,
@@ -770,7 +770,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
volumeFlagName := "volume"
createFlags.StringArrayVarP(
&cf.Volume,
- volumeFlagName, "v", volumes(),
+ volumeFlagName, "v", cf.Volume,
volumeDesciption,
)
_ = cmd.RegisterFlagCompletionFunc(volumeFlagName, AutocompleteVolumeFlag)
@@ -864,14 +864,6 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(cpusetMemsFlagName, completion.AutocompleteNone)
- memoryFlagName := "memory"
- createFlags.StringVarP(
- &cf.Memory,
- memoryFlagName, "m", "",
- "Memory limit "+sizeWithUnitFormat,
- )
- _ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone)
-
memoryReservationFlagName := "memory-reservation"
createFlags.StringVar(
&cf.MemoryReservation,
@@ -891,12 +883,13 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
memorySwappinessFlagName := "memory-swappiness"
createFlags.Int64Var(
&cf.MemorySwappiness,
- memorySwappinessFlagName, -1,
+ memorySwappinessFlagName, cf.MemorySwappiness,
"Tune container memory swappiness (0 to 100, or -1 for system default)",
)
_ = cmd.RegisterFlagCompletionFunc(memorySwappinessFlagName, completion.AutocompleteNone)
}
// anyone can use these
+
cpusFlagName := "cpus"
createFlags.Float64Var(
&cf.CPUS,
@@ -912,4 +905,12 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
"CPUs in which to allow execution (0-3, 0,1)",
)
_ = cmd.RegisterFlagCompletionFunc(cpusetCpusFlagName, completion.AutocompleteNone)
+
+ memoryFlagName := "memory"
+ createFlags.StringVarP(
+ &cf.Memory,
+ memoryFlagName, "m", "",
+ "Memory limit "+sizeWithUnitFormat,
+ )
+ _ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone)
}
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index c40d1ea51..fb5af8f59 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -1,472 +1,11 @@
package common
import (
- "fmt"
- "net"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- "github.com/containers/common/libnetwork/types"
- "github.com/containers/common/pkg/cgroups"
- "github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/pkg/api/handlers"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/containers/podman/v4/pkg/rootless"
- "github.com/containers/podman/v4/pkg/specgen"
- "github.com/docker/docker/api/types/mount"
- "github.com/pkg/errors"
)
-func stringMaptoArray(m map[string]string) []string {
- a := make([]string, 0, len(m))
- for k, v := range m {
- a = append(a, fmt.Sprintf("%s=%s", k, v))
- }
- return a
-}
-
-// ContainerCreateToContainerCLIOpts converts a compat input struct to cliopts so it can be converted to
-// a specgen spec.
-func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.ContainerCreateOptions, []string, error) {
- var (
- capAdd []string
- cappDrop []string
- entrypoint *string
- init bool
- specPorts []types.PortMapping
- )
-
- if cc.HostConfig.Init != nil {
- init = *cc.HostConfig.Init
- }
-
- // Iterate devices and convert back to string
- devices := make([]string, 0, len(cc.HostConfig.Devices))
- for _, dev := range cc.HostConfig.Devices {
- devices = append(devices, fmt.Sprintf("%s:%s:%s", dev.PathOnHost, dev.PathInContainer, dev.CgroupPermissions))
- }
-
- // iterate blkreaddevicebps
- readBps := make([]string, 0, len(cc.HostConfig.BlkioDeviceReadBps))
- for _, dev := range cc.HostConfig.BlkioDeviceReadBps {
- readBps = append(readBps, dev.String())
- }
-
- // iterate blkreaddeviceiops
- readIops := make([]string, 0, len(cc.HostConfig.BlkioDeviceReadIOps))
- for _, dev := range cc.HostConfig.BlkioDeviceReadIOps {
- readIops = append(readIops, dev.String())
- }
-
- // iterate blkwritedevicebps
- writeBps := make([]string, 0, len(cc.HostConfig.BlkioDeviceWriteBps))
- for _, dev := range cc.HostConfig.BlkioDeviceWriteBps {
- writeBps = append(writeBps, dev.String())
- }
-
- // iterate blkwritedeviceiops
- writeIops := make([]string, 0, len(cc.HostConfig.BlkioDeviceWriteIOps))
- for _, dev := range cc.HostConfig.BlkioDeviceWriteIOps {
- writeIops = append(writeIops, dev.String())
- }
-
- // entrypoint
- // can be a string or slice. if it is a slice, we need to
- // marshall it to json; otherwise it should just be the string
- // value
- if len(cc.Config.Entrypoint) > 0 {
- entrypoint = &cc.Config.Entrypoint[0]
- if len(cc.Config.Entrypoint) > 1 {
- b, err := json.Marshal(cc.Config.Entrypoint)
- if err != nil {
- return nil, nil, err
- }
- var jsonString = string(b)
- entrypoint = &jsonString
- }
- }
-
- // expose ports
- expose := make([]string, 0, len(cc.Config.ExposedPorts))
- for p := range cc.Config.ExposedPorts {
- expose = append(expose, fmt.Sprintf("%s/%s", p.Port(), p.Proto()))
- }
-
- // mounts type=tmpfs/bind,source=...,target=...=,opt=val
- volSources := make(map[string]bool)
- volDestinations := make(map[string]bool)
- mounts := make([]string, 0, len(cc.HostConfig.Mounts))
- var builder strings.Builder
- for _, m := range cc.HostConfig.Mounts {
- addField(&builder, "type", string(m.Type))
- addField(&builder, "source", m.Source)
- addField(&builder, "target", m.Target)
-
- // Store source/dest so we don't add duplicates if a volume is
- // also mentioned in cc.Volumes.
- // Which Docker Compose v2.0 does, for unclear reasons...
- volSources[m.Source] = true
- volDestinations[m.Target] = true
-
- if m.ReadOnly {
- addField(&builder, "ro", "true")
- }
- addField(&builder, "consistency", string(m.Consistency))
- // Map any specialized mount options that intersect between *Options and cli options
- switch m.Type {
- case mount.TypeBind:
- if m.BindOptions != nil {
- addField(&builder, "bind-propagation", string(m.BindOptions.Propagation))
- addField(&builder, "bind-nonrecursive", strconv.FormatBool(m.BindOptions.NonRecursive))
- }
- case mount.TypeTmpfs:
- if m.TmpfsOptions != nil {
- addField(&builder, "tmpfs-size", strconv.FormatInt(m.TmpfsOptions.SizeBytes, 10))
- addField(&builder, "tmpfs-mode", strconv.FormatUint(uint64(m.TmpfsOptions.Mode), 8))
- }
- case mount.TypeVolume:
- // All current VolumeOpts are handled above
- // See vendor/github.com/containers/common/pkg/parse/parse.go:ValidateVolumeOpts()
- }
- mounts = append(mounts, builder.String())
- builder.Reset()
- }
-
- // dns
- dns := make([]net.IP, 0, len(cc.HostConfig.DNS))
- for _, d := range cc.HostConfig.DNS {
- dns = append(dns, net.ParseIP(d))
- }
-
- // publish
- for port, pbs := range cc.HostConfig.PortBindings {
- for _, pb := range pbs {
- var hostport int
- var err error
- if pb.HostPort != "" {
- hostport, err = strconv.Atoi(pb.HostPort)
- }
- if err != nil {
- return nil, nil, err
- }
- tmpPort := types.PortMapping{
- HostIP: pb.HostIP,
- ContainerPort: uint16(port.Int()),
- HostPort: uint16(hostport),
- Range: 0,
- Protocol: port.Proto(),
- }
- specPorts = append(specPorts, tmpPort)
- }
- }
-
- // special case for NetworkMode, the podman default is slirp4netns for
- // rootless but for better docker compat we want bridge.
- netmode := string(cc.HostConfig.NetworkMode)
- if netmode == "" || netmode == "default" {
- netmode = "bridge"
- }
- nsmode, networks, netOpts, err := specgen.ParseNetworkFlag([]string{netmode})
- if err != nil {
- return nil, nil, err
- }
-
- // network
- // Note: we cannot emulate compat exactly here. we only allow specifics of networks to be
- // defined when there is only one network.
- netInfo := entities.NetOptions{
- AddHosts: cc.HostConfig.ExtraHosts,
- DNSOptions: cc.HostConfig.DNSOptions,
- DNSSearch: cc.HostConfig.DNSSearch,
- DNSServers: dns,
- Network: nsmode,
- PublishPorts: specPorts,
- NetworkOptions: netOpts,
- NoHosts: rtc.Containers.NoHosts,
- }
-
- // network names
- switch {
- case len(cc.NetworkingConfig.EndpointsConfig) > 0:
- endpointsConfig := cc.NetworkingConfig.EndpointsConfig
- networks := make(map[string]types.PerNetworkOptions, len(endpointsConfig))
- for netName, endpoint := range endpointsConfig {
- netOpts := types.PerNetworkOptions{}
- if endpoint != nil {
- netOpts.Aliases = endpoint.Aliases
-
- // if IP address is provided
- if len(endpoint.IPAddress) > 0 {
- staticIP := net.ParseIP(endpoint.IPAddress)
- if staticIP == nil {
- return nil, nil, errors.Errorf("failed to parse the ip address %q", endpoint.IPAddress)
- }
- netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
- }
-
- if endpoint.IPAMConfig != nil {
- // if IPAMConfig.IPv4Address is provided
- if len(endpoint.IPAMConfig.IPv4Address) > 0 {
- staticIP := net.ParseIP(endpoint.IPAMConfig.IPv4Address)
- if staticIP == nil {
- return nil, nil, errors.Errorf("failed to parse the ipv4 address %q", endpoint.IPAMConfig.IPv4Address)
- }
- netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
- }
- // if IPAMConfig.IPv6Address is provided
- if len(endpoint.IPAMConfig.IPv6Address) > 0 {
- staticIP := net.ParseIP(endpoint.IPAMConfig.IPv6Address)
- if staticIP == nil {
- return nil, nil, errors.Errorf("failed to parse the ipv6 address %q", endpoint.IPAMConfig.IPv6Address)
- }
- netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
- }
- }
- // If MAC address is provided
- if len(endpoint.MacAddress) > 0 {
- staticMac, err := net.ParseMAC(endpoint.MacAddress)
- if err != nil {
- return nil, nil, errors.Errorf("failed to parse the mac address %q", endpoint.MacAddress)
- }
- netOpts.StaticMAC = types.HardwareAddr(staticMac)
- }
- }
-
- networks[netName] = netOpts
- }
-
- netInfo.Networks = networks
- case len(cc.HostConfig.NetworkMode) > 0:
- netInfo.Networks = networks
- }
-
- parsedTmp := make([]string, 0, len(cc.HostConfig.Tmpfs))
- for path, options := range cc.HostConfig.Tmpfs {
- finalString := path
- if options != "" {
- finalString += ":" + options
- }
- parsedTmp = append(parsedTmp, finalString)
- }
-
- // Note: several options here are marked as "don't need". this is based
- // on speculation by Matt and I. We think that these come into play later
- // like with start. We believe this is just a difference in podman/compat
- cliOpts := entities.ContainerCreateOptions{
- // Attach: nil, // don't need?
- Authfile: "",
- CapAdd: append(capAdd, cc.HostConfig.CapAdd...),
- CapDrop: append(cappDrop, cc.HostConfig.CapDrop...),
- CgroupParent: cc.HostConfig.CgroupParent,
- CIDFile: cc.HostConfig.ContainerIDFile,
- CPUPeriod: uint64(cc.HostConfig.CPUPeriod),
- CPUQuota: cc.HostConfig.CPUQuota,
- CPURTPeriod: uint64(cc.HostConfig.CPURealtimePeriod),
- CPURTRuntime: cc.HostConfig.CPURealtimeRuntime,
- CPUShares: uint64(cc.HostConfig.CPUShares),
- // CPUS: 0, // don't need?
- CPUSetCPUs: cc.HostConfig.CpusetCpus,
- CPUSetMems: cc.HostConfig.CpusetMems,
- // Detach: false, // don't need
- // DetachKeys: "", // don't need
- Devices: devices,
- DeviceCgroupRule: nil,
- DeviceReadBPs: readBps,
- DeviceReadIOPs: readIops,
- DeviceWriteBPs: writeBps,
- DeviceWriteIOPs: writeIops,
- Entrypoint: entrypoint,
- Env: cc.Config.Env,
- Expose: expose,
- GroupAdd: cc.HostConfig.GroupAdd,
- Hostname: cc.Config.Hostname,
- ImageVolume: "bind",
- Init: init,
- Interactive: cc.Config.OpenStdin,
- IPC: string(cc.HostConfig.IpcMode),
- Label: stringMaptoArray(cc.Config.Labels),
- LogDriver: cc.HostConfig.LogConfig.Type,
- LogOptions: stringMaptoArray(cc.HostConfig.LogConfig.Config),
- Name: cc.Name,
- OOMScoreAdj: &cc.HostConfig.OomScoreAdj,
- Arch: "",
- OS: "",
- Variant: "",
- PID: string(cc.HostConfig.PidMode),
- PIDsLimit: cc.HostConfig.PidsLimit,
- Privileged: cc.HostConfig.Privileged,
- PublishAll: cc.HostConfig.PublishAllPorts,
- Quiet: false,
- ReadOnly: cc.HostConfig.ReadonlyRootfs,
- ReadOnlyTmpFS: true, // podman default
- Rm: cc.HostConfig.AutoRemove,
- SecurityOpt: cc.HostConfig.SecurityOpt,
- StopSignal: cc.Config.StopSignal,
- StorageOpts: stringMaptoArray(cc.HostConfig.StorageOpt),
- Sysctl: stringMaptoArray(cc.HostConfig.Sysctls),
- Systemd: "true", // podman default
- TmpFS: parsedTmp,
- TTY: cc.Config.Tty,
- UnsetEnv: cc.UnsetEnv,
- UnsetEnvAll: cc.UnsetEnvAll,
- User: cc.Config.User,
- UserNS: string(cc.HostConfig.UsernsMode),
- UTS: string(cc.HostConfig.UTSMode),
- Mount: mounts,
- VolumesFrom: cc.HostConfig.VolumesFrom,
- Workdir: cc.Config.WorkingDir,
- Net: &netInfo,
- HealthInterval: define.DefaultHealthCheckInterval,
- HealthRetries: define.DefaultHealthCheckRetries,
- HealthTimeout: define.DefaultHealthCheckTimeout,
- HealthStartPeriod: define.DefaultHealthCheckStartPeriod,
- }
- if !rootless.IsRootless() {
- var ulimits []string
- if len(cc.HostConfig.Ulimits) > 0 {
- for _, ul := range cc.HostConfig.Ulimits {
- ulimits = append(ulimits, ul.String())
- }
- cliOpts.Ulimit = ulimits
- }
- }
- if cc.HostConfig.Resources.NanoCPUs > 0 {
- if cliOpts.CPUPeriod != 0 || cliOpts.CPUQuota != 0 {
- return nil, nil, errors.Errorf("NanoCpus conflicts with CpuPeriod and CpuQuota")
- }
- cliOpts.CPUPeriod = 100000
- cliOpts.CPUQuota = cc.HostConfig.Resources.NanoCPUs / 10000
- }
-
- // volumes
- for _, vol := range cc.HostConfig.Binds {
- cliOpts.Volume = append(cliOpts.Volume, vol)
- // Extract the destination so we don't add duplicate mounts in
- // the volumes phase.
- splitVol := specgen.SplitVolumeString(vol)
- switch len(splitVol) {
- case 1:
- volDestinations[vol] = true
- default:
- volSources[splitVol[0]] = true
- volDestinations[splitVol[1]] = true
- }
- }
- // Anonymous volumes are added differently from other volumes, in their
- // own special field, for reasons known only to Docker. Still use the
- // format of `-v` so we can just append them in there.
- // Unfortunately, these may be duplicates of existing mounts in Binds.
- // So... We need to catch that.
- // This also handles volumes duplicated between cc.HostConfig.Mounts and
- // cc.Volumes, as seen in compose v2.0.
- for vol := range cc.Volumes {
- if _, ok := volDestinations[filepath.Clean(vol)]; ok {
- continue
- }
- cliOpts.Volume = append(cliOpts.Volume, vol)
- }
- // Make mount points for compat volumes
- for vol := range volSources {
- // This might be a named volume.
- // Assume it is if it's not an absolute path.
- if !filepath.IsAbs(vol) {
- continue
- }
- // If volume already exists, there is nothing to do
- if _, err := os.Stat(vol); err == nil {
- continue
- }
- if err := os.MkdirAll(vol, 0755); err != nil {
- if !os.IsExist(err) {
- return nil, nil, errors.Wrapf(err, "error making volume mountpoint for volume %s", vol)
- }
- }
- }
- if len(cc.HostConfig.BlkioWeightDevice) > 0 {
- devices := make([]string, 0, len(cc.HostConfig.BlkioWeightDevice))
- for _, d := range cc.HostConfig.BlkioWeightDevice {
- devices = append(devices, d.String())
- }
- cliOpts.BlkIOWeightDevice = devices
- }
- if cc.HostConfig.BlkioWeight > 0 {
- cliOpts.BlkIOWeight = strconv.Itoa(int(cc.HostConfig.BlkioWeight))
- }
-
- if cc.HostConfig.Memory > 0 {
- cliOpts.Memory = strconv.Itoa(int(cc.HostConfig.Memory))
- }
-
- if cc.HostConfig.MemoryReservation > 0 {
- cliOpts.MemoryReservation = strconv.Itoa(int(cc.HostConfig.MemoryReservation))
- }
-
- cgroupsv2, err := cgroups.IsCgroup2UnifiedMode()
- if err != nil {
- return nil, nil, err
- }
- if cc.HostConfig.MemorySwap > 0 && (!rootless.IsRootless() || (rootless.IsRootless() && cgroupsv2)) {
- cliOpts.MemorySwap = strconv.Itoa(int(cc.HostConfig.MemorySwap))
- }
-
- if cc.Config.StopTimeout != nil {
- cliOpts.StopTimeout = uint(*cc.Config.StopTimeout)
- }
-
- if cc.HostConfig.ShmSize > 0 {
- cliOpts.ShmSize = strconv.Itoa(int(cc.HostConfig.ShmSize))
- }
-
- if len(cc.HostConfig.RestartPolicy.Name) > 0 {
- policy := cc.HostConfig.RestartPolicy.Name
- // only add restart count on failure
- if cc.HostConfig.RestartPolicy.IsOnFailure() {
- policy += fmt.Sprintf(":%d", cc.HostConfig.RestartPolicy.MaximumRetryCount)
- }
- cliOpts.Restart = policy
- }
-
- if cc.HostConfig.MemorySwappiness != nil && (!rootless.IsRootless() || rootless.IsRootless() && cgroupsv2 && rtc.Engine.CgroupManager == "systemd") {
- cliOpts.MemorySwappiness = *cc.HostConfig.MemorySwappiness
- } else {
- cliOpts.MemorySwappiness = -1
- }
- if cc.HostConfig.OomKillDisable != nil {
- cliOpts.OOMKillDisable = *cc.HostConfig.OomKillDisable
- }
- if cc.Config.Healthcheck != nil {
- finCmd := ""
- for _, str := range cc.Config.Healthcheck.Test {
- finCmd = finCmd + str + " "
- }
- if len(finCmd) > 1 {
- finCmd = finCmd[:len(finCmd)-1]
- }
- cliOpts.HealthCmd = finCmd
- if cc.Config.Healthcheck.Interval > 0 {
- cliOpts.HealthInterval = cc.Config.Healthcheck.Interval.String()
- }
- if cc.Config.Healthcheck.Retries > 0 {
- cliOpts.HealthRetries = uint(cc.Config.Healthcheck.Retries)
- }
- if cc.Config.Healthcheck.StartPeriod > 0 {
- cliOpts.HealthStartPeriod = cc.Config.Healthcheck.StartPeriod.String()
- }
- if cc.Config.Healthcheck.Timeout > 0 {
- cliOpts.HealthTimeout = cc.Config.Healthcheck.Timeout.String()
- }
- }
-
- // specgen assumes the image name is arg[0]
- cmd := []string{cc.Config.Image}
- cmd = append(cmd, cc.Config.Cmd...)
- return &cliOpts, cmd, nil
-}
-
func ulimits() []string {
if !registry.IsRemote() {
return containerConfig.Ulimits()
@@ -488,7 +27,7 @@ func devices() []string {
return nil
}
-func env() []string {
+func Env() []string {
if !registry.IsRemote() {
return containerConfig.Env()
}
@@ -537,16 +76,20 @@ func LogDriver() string {
return ""
}
-// addField is a helper function to populate mount options
-func addField(b *strings.Builder, name string, value string) {
- if value == "" {
- return
- }
-
- if b.Len() > 0 {
- b.WriteRune(',')
- }
- b.WriteString(name)
- b.WriteRune('=')
- b.WriteString(value)
+// DefineCreateDefault is used to initialize ctr create options before flag initialization
+func DefineCreateDefaults(opts *entities.ContainerCreateOptions) {
+ opts.LogDriver = LogDriver()
+ opts.CgroupsMode = cgroupConfig()
+ opts.MemorySwappiness = -1
+ opts.ImageVolume = containerConfig.Engine.ImageVolumeMode
+ opts.Pull = policy()
+ opts.ReadOnlyTmpFS = true
+ opts.SdNotifyMode = define.SdNotifyModeContainer
+ opts.StopTimeout = containerConfig.Engine.StopTimeout
+ opts.Systemd = "true"
+ opts.Timezone = containerConfig.TZ()
+ opts.Umask = containerConfig.Umask()
+ opts.Ulimit = ulimits()
+ opts.SeccompPolicy = "default"
+ opts.Volume = volumes()
}
diff --git a/cmd/podman/common/default.go b/cmd/podman/common/default.go
index 7caec50ff..6f78d3d29 100644
--- a/cmd/podman/common/default.go
+++ b/cmd/podman/common/default.go
@@ -5,9 +5,6 @@ import (
)
var (
-
- // DefaultImageVolume default value
- DefaultImageVolume = "bind"
// Pull in configured json library
json = registry.JSONLibrary()
)
diff --git a/cmd/podman/common/diffChanges.go b/cmd/podman/common/diffChanges.go
index 99b5f1dcd..6d4c7154a 100644
--- a/cmd/podman/common/diffChanges.go
+++ b/cmd/podman/common/diffChanges.go
@@ -6,7 +6,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
)
type ChangesReportJSON struct {
@@ -26,7 +25,7 @@ func ChangesToJSON(diffs *entities.DiffReport) error {
case archive.ChangeModify:
body.Changed = append(body.Changed, row.Path)
default:
- return errors.Errorf("output kind %q not recognized", row.Kind)
+ return fmt.Errorf("output kind %q not recognized", row.Kind)
}
}
diff --git a/cmd/podman/common/netflags.go b/cmd/podman/common/netflags.go
index 9dfe81d62..e7aa265d1 100644
--- a/cmd/podman/common/netflags.go
+++ b/cmd/podman/common/netflags.go
@@ -1,6 +1,8 @@
package common
import (
+ "errors"
+ "fmt"
"net"
"github.com/containers/common/libnetwork/types"
@@ -10,7 +12,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgenutil"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -125,13 +126,13 @@ func NetFlagsToNetOptions(opts *entities.NetOptions, flags pflag.FlagSet) (*enti
if d == "none" {
opts.UseImageResolvConf = true
if len(servers) > 1 {
- return nil, errors.Errorf("%s is not allowed to be specified with other DNS ip addresses", d)
+ return nil, fmt.Errorf("%s is not allowed to be specified with other DNS ip addresses", d)
}
break
}
dns := net.ParseIP(d)
if dns == nil {
- return nil, errors.Errorf("%s is not an ip address", d)
+ return nil, fmt.Errorf("%s is not an ip address", d)
}
opts.DNSServers = append(opts.DNSServers, dns)
}
@@ -154,7 +155,7 @@ func NetFlagsToNetOptions(opts *entities.NetOptions, flags pflag.FlagSet) (*enti
for _, dom := range dnsSearches {
if dom == "." {
if len(dnsSearches) > 1 {
- return nil, errors.Errorf("cannot pass additional search domains when also specifying '.'")
+ return nil, errors.New("cannot pass additional search domains when also specifying '.'")
}
continue
}
@@ -218,18 +219,18 @@ func NetFlagsToNetOptions(opts *entities.NetOptions, flags pflag.FlagSet) (*enti
if ip != "" {
// if pod create --infra=false
if infra, err := flags.GetBool("infra"); err == nil && !infra {
- return nil, errors.Wrapf(define.ErrInvalidArg, "cannot set --%s without infra container", ipFlagName)
+ return nil, fmt.Errorf("cannot set --%s without infra container: %w", ipFlagName, define.ErrInvalidArg)
}
staticIP := net.ParseIP(ip)
if staticIP == nil {
- return nil, errors.Errorf("%q is not an ip address", ip)
+ return nil, fmt.Errorf("%q is not an ip address", ip)
}
if !opts.Network.IsBridge() && !opts.Network.IsDefault() {
- return nil, errors.Wrapf(define.ErrInvalidArg, "--%s can only be set when the network mode is bridge", ipFlagName)
+ return nil, fmt.Errorf("--%s can only be set when the network mode is bridge: %w", ipFlagName, define.ErrInvalidArg)
}
if len(opts.Networks) != 1 {
- return nil, errors.Wrapf(define.ErrInvalidArg, "--%s can only be set for a single network", ipFlagName)
+ return nil, fmt.Errorf("--%s can only be set for a single network: %w", ipFlagName, define.ErrInvalidArg)
}
for name, netOpts := range opts.Networks {
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
@@ -245,17 +246,17 @@ func NetFlagsToNetOptions(opts *entities.NetOptions, flags pflag.FlagSet) (*enti
if len(m) > 0 {
// if pod create --infra=false
if infra, err := flags.GetBool("infra"); err == nil && !infra {
- return nil, errors.Wrap(define.ErrInvalidArg, "cannot set --mac without infra container")
+ return nil, fmt.Errorf("cannot set --mac without infra container: %w", define.ErrInvalidArg)
}
mac, err := net.ParseMAC(m)
if err != nil {
return nil, err
}
if !opts.Network.IsBridge() && !opts.Network.IsDefault() {
- return nil, errors.Wrap(define.ErrInvalidArg, "--mac-address can only be set when the network mode is bridge")
+ return nil, fmt.Errorf("--mac-address can only be set when the network mode is bridge: %w", define.ErrInvalidArg)
}
if len(opts.Networks) != 1 {
- return nil, errors.Wrap(define.ErrInvalidArg, "--mac-address can only be set for a single network")
+ return nil, fmt.Errorf("--mac-address can only be set for a single network: %w", define.ErrInvalidArg)
}
for name, netOpts := range opts.Networks {
netOpts.StaticMAC = types.HardwareAddr(mac)
@@ -270,10 +271,10 @@ func NetFlagsToNetOptions(opts *entities.NetOptions, flags pflag.FlagSet) (*enti
if len(aliases) > 0 {
// if pod create --infra=false
if infra, err := flags.GetBool("infra"); err == nil && !infra {
- return nil, errors.Wrap(define.ErrInvalidArg, "cannot set --network-alias without infra container")
+ return nil, fmt.Errorf("cannot set --network-alias without infra container: %w", define.ErrInvalidArg)
}
if !opts.Network.IsBridge() && !opts.Network.IsDefault() {
- return nil, errors.Wrap(define.ErrInvalidArg, "--network-alias can only be set when the network mode is bridge")
+ return nil, fmt.Errorf("--network-alias can only be set when the network mode is bridge: %w", define.ErrInvalidArg)
}
for name, netOpts := range opts.Networks {
netOpts.Aliases = aliases
diff --git a/cmd/podman/containers/attach.go b/cmd/podman/containers/attach.go
index 1a05ea5c3..52adb1fcb 100644
--- a/cmd/podman/containers/attach.go
+++ b/cmd/podman/containers/attach.go
@@ -1,13 +1,13 @@
package containers
import (
+ "errors"
"os"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -70,7 +70,7 @@ func init() {
func attach(cmd *cobra.Command, args []string) error {
if len(args) > 1 || (len(args) == 0 && !attachOpts.Latest) {
- return errors.Errorf("attach requires the name or id of one running container or the latest flag")
+ return errors.New("attach requires the name or id of one running container or the latest flag")
}
var name string
diff --git a/cmd/podman/containers/checkpoint.go b/cmd/podman/containers/checkpoint.go
index 40d689c4d..0eb0db394 100644
--- a/cmd/podman/containers/checkpoint.go
+++ b/cmd/podman/containers/checkpoint.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"strings"
"time"
@@ -15,7 +16,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -31,7 +31,7 @@ var (
Long: checkpointDescription,
RunE: checkpoint,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompleteContainersRunning,
Example: `podman container checkpoint --keep ctrID
@@ -90,7 +90,7 @@ func checkpoint(cmd *cobra.Command, args []string) error {
podmanStart := time.Now()
if cmd.Flags().Changed("compress") {
if checkpointOptions.Export == "" {
- return errors.Errorf("--compress can only be used with --export")
+ return errors.New("--compress can only be used with --export")
}
compress, _ := cmd.Flags().GetString("compress")
switch strings.ToLower(compress) {
@@ -101,7 +101,7 @@ func checkpoint(cmd *cobra.Command, args []string) error {
case "zstd":
checkpointOptions.Compression = archive.Zstd
default:
- return errors.Errorf("Selected compression algorithm (%q) not supported. Please select one from: gzip, none, zstd", compress)
+ return fmt.Errorf("selected compression algorithm (%q) not supported. Please select one from: gzip, none, zstd", compress)
}
} else {
checkpointOptions.Compression = archive.Zstd
@@ -110,13 +110,13 @@ func checkpoint(cmd *cobra.Command, args []string) error {
return errors.New("checkpointing a container requires root")
}
if checkpointOptions.Export == "" && checkpointOptions.IgnoreRootFS {
- return errors.Errorf("--ignore-rootfs can only be used with --export")
+ return errors.New("--ignore-rootfs can only be used with --export")
}
if checkpointOptions.Export == "" && checkpointOptions.IgnoreVolumes {
- return errors.Errorf("--ignore-volumes can only be used with --export")
+ return errors.New("--ignore-volumes can only be used with --export")
}
if checkpointOptions.WithPrevious && checkpointOptions.PreCheckPoint {
- return errors.Errorf("--with-previous can not be used with --pre-checkpoint")
+ return errors.New("--with-previous can not be used with --pre-checkpoint")
}
if (checkpointOptions.WithPrevious || checkpointOptions.PreCheckPoint) && !criu.MemTrack() {
return errors.New("system (architecture/kernel/CRIU) does not support memory tracking")
diff --git a/cmd/podman/containers/cleanup.go b/cmd/podman/containers/cleanup.go
index aa2734607..c9a5cb28b 100644
--- a/cmd/podman/containers/cleanup.go
+++ b/cmd/podman/containers/cleanup.go
@@ -1,6 +1,7 @@
package containers
import (
+ "errors"
"fmt"
"github.com/containers/common/pkg/completion"
@@ -9,7 +10,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -23,11 +23,11 @@ var (
cleanupCommand = &cobra.Command{
Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
Use: "cleanup [options] CONTAINER [CONTAINER...]",
- Short: "Cleanup network and mountpoints of one or more containers",
+ Short: "Clean up network and mountpoints of one or more containers",
Long: cleanupDescription,
RunE: cleanup,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompleteContainersExited,
Example: `podman container cleanup --latest
@@ -65,11 +65,11 @@ func cleanup(cmd *cobra.Command, args []string) error {
if cleanupOptions.Exec != "" {
switch {
case cleanupOptions.All:
- return errors.Errorf("exec and all options conflict")
+ return errors.New("exec and all options conflict")
case len(args) > 1:
- return errors.Errorf("cannot use exec option when more than one container is given")
+ return errors.New("cannot use exec option when more than one container is given")
case cleanupOptions.RemoveImage:
- return errors.Errorf("exec and rmi options conflict")
+ return errors.New("exec and rmi options conflict")
}
}
diff --git a/cmd/podman/containers/clone.go b/cmd/podman/containers/clone.go
index 6912da1fc..9881a791c 100644
--- a/cmd/podman/containers/clone.go
+++ b/cmd/podman/containers/clone.go
@@ -7,7 +7,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -41,6 +40,7 @@ func cloneFlags(cmd *cobra.Command) {
forceFlagName := "force"
flags.BoolVarP(&ctrClone.Force, forceFlagName, "f", false, "force the existing container to be destroyed")
+ common.DefineCreateDefaults(&ctrClone.CreateOpts)
common.DefineCreateFlags(cmd, &ctrClone.CreateOpts, false, true)
}
func init() {
@@ -55,7 +55,7 @@ func init() {
func clone(cmd *cobra.Command, args []string) error {
switch len(args) {
case 0:
- return errors.Wrapf(define.ErrInvalidArg, "must specify at least 1 argument")
+ return fmt.Errorf("must specify at least 1 argument: %w", define.ErrInvalidArg)
case 2:
ctrClone.CreateOpts.Name = args[1]
case 3:
@@ -63,7 +63,7 @@ func clone(cmd *cobra.Command, args []string) error {
ctrClone.Image = args[2]
if !cliVals.RootFS {
rawImageName := args[0]
- name, err := PullImage(ctrClone.Image, ctrClone.CreateOpts)
+ name, err := PullImage(ctrClone.Image, &ctrClone.CreateOpts)
if err != nil {
return err
}
@@ -72,7 +72,7 @@ func clone(cmd *cobra.Command, args []string) error {
}
}
if ctrClone.Force && !ctrClone.Destroy {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set --force without --destroy")
+ return fmt.Errorf("cannot set --force without --destroy: %w", define.ErrInvalidArg)
}
ctrClone.ID = args[0]
diff --git a/cmd/podman/containers/commit.go b/cmd/podman/containers/commit.go
index e0cadd5b0..fb6dccad4 100644
--- a/cmd/podman/containers/commit.go
+++ b/cmd/podman/containers/commit.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -109,7 +108,7 @@ func commit(cmd *cobra.Command, args []string) error {
}
if len(iidFile) > 0 {
if err = ioutil.WriteFile(iidFile, []byte(response.Id), 0644); err != nil {
- return errors.Wrap(err, "failed to write image ID")
+ return fmt.Errorf("failed to write image ID: %w", err)
}
}
fmt.Println(response.Id)
diff --git a/cmd/podman/containers/cp.go b/cmd/podman/containers/cp.go
index eb18dfce4..c38cba66e 100644
--- a/cmd/podman/containers/cp.go
+++ b/cmd/podman/containers/cp.go
@@ -1,6 +1,7 @@
package containers
import (
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -9,6 +10,8 @@ import (
"strconv"
"strings"
+ "errors"
+
buildahCopiah "github.com/containers/buildah/copier"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
@@ -17,7 +20,6 @@ import (
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -55,10 +57,13 @@ var (
func cpFlags(cmd *cobra.Command) {
flags := cmd.Flags()
- flags.BoolVar(&cpOpts.Extract, "extract", false, "Deprecated...")
- flags.BoolVar(&cpOpts.Pause, "pause", true, "Deprecated")
+ flags.BoolVar(&cpOpts.OverwriteDirNonDir, "overwrite", false, "Allow to overwrite directories with non-directories and vice versa")
flags.BoolVarP(&chown, "archive", "a", true, `Chown copied files to the primary uid/gid of the destination container.`)
+
+ // Deprecated flags (both are NOPs): exist for backwards compat
+ flags.BoolVar(&cpOpts.Extract, "extract", false, "Deprecated...")
_ = flags.MarkHidden("extract")
+ flags.BoolVar(&cpOpts.Pause, "pause", true, "Deprecated")
_ = flags.MarkHidden("pause")
}
@@ -99,7 +104,7 @@ func containerMustExist(container string) error {
return err
}
if !exists.Value {
- return errors.Errorf("container %q does not exist", container)
+ return fmt.Errorf("container %q does not exist", container)
}
return nil
}
@@ -128,7 +133,7 @@ func copyContainerToContainer(sourceContainer string, sourcePath string, destCon
sourceContainerInfo, err := registry.ContainerEngine().ContainerStat(registry.GetContext(), sourceContainer, sourcePath)
if err != nil {
- return errors.Wrapf(err, "%q could not be found on container %s", sourcePath, sourceContainer)
+ return fmt.Errorf("%q could not be found on container %s: %w", sourcePath, sourceContainer, err)
}
destContainerBaseName, destContainerInfo, destResolvedToParentDir, err := resolvePathOnDestinationContainer(destContainer, destPath, false)
@@ -167,7 +172,7 @@ func copyContainerToContainer(sourceContainer string, sourcePath string, destCon
return err
}
if err := copyFunc(); err != nil {
- return errors.Wrap(err, "error copying from container")
+ return fmt.Errorf("error copying from container: %w", err)
}
return nil
}
@@ -175,7 +180,7 @@ func copyContainerToContainer(sourceContainer string, sourcePath string, destCon
destContainerCopy := func() error {
defer reader.Close()
- copyOptions := entities.CopyOptions{Chown: chown}
+ copyOptions := entities.CopyOptions{Chown: chown, NoOverwriteDirNonDir: !cpOpts.OverwriteDirNonDir}
if (!sourceContainerInfo.IsDir && !destContainerInfo.IsDir) || destResolvedToParentDir {
// If we're having a file-to-file copy, make sure to
// rename accordingly.
@@ -187,7 +192,7 @@ func copyContainerToContainer(sourceContainer string, sourcePath string, destCon
return err
}
if err := copyFunc(); err != nil {
- return errors.Wrap(err, "error copying to container")
+ return fmt.Errorf("error copying to container: %w", err)
}
return nil
}
@@ -209,7 +214,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
containerInfo, err := registry.ContainerEngine().ContainerStat(registry.GetContext(), container, containerPath)
if err != nil {
- return errors.Wrapf(err, "%q could not be found on container %s", containerPath, container)
+ return fmt.Errorf("%q could not be found on container %s: %w", containerPath, container, err)
}
var hostBaseName string
@@ -217,13 +222,13 @@ func copyFromContainer(container string, containerPath string, hostPath string)
hostInfo, hostInfoErr := copy.ResolveHostPath(hostPath)
if hostInfoErr != nil {
if strings.HasSuffix(hostPath, "/") {
- return errors.Wrapf(hostInfoErr, "%q could not be found on the host", hostPath)
+ return fmt.Errorf("%q could not be found on the host: %w", hostPath, hostInfoErr)
}
// If it doesn't exist, then let's have a look at the parent dir.
parentDir := filepath.Dir(hostPath)
hostInfo, err = copy.ResolveHostPath(parentDir)
if err != nil {
- return errors.Wrapf(hostInfoErr, "%q could not be found on the host", hostPath)
+ return fmt.Errorf("%q could not be found on the host: %w", hostPath, hostInfoErr)
}
// If the specified path does not exist, we need to assume that
// it'll be created while copying. Hence, we use it as the
@@ -238,7 +243,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
if !isStdout {
if err := validateFileInfo(hostInfo); err != nil {
- return errors.Wrap(err, "invalid destination")
+ return fmt.Errorf("invalid destination: %w", err)
}
}
@@ -294,9 +299,11 @@ func copyFromContainer(container string, containerPath string, hostPath string)
}
putOptions := buildahCopiah.PutOptions{
- ChownDirs: &idPair,
- ChownFiles: &idPair,
- IgnoreDevices: true,
+ ChownDirs: &idPair,
+ ChownFiles: &idPair,
+ IgnoreDevices: true,
+ NoOverwriteDirNonDir: !cpOpts.OverwriteDirNonDir,
+ NoOverwriteNonDirDir: !cpOpts.OverwriteDirNonDir,
}
if (!containerInfo.IsDir && !hostInfo.IsDir) || resolvedToHostParentDir {
// If we're having a file-to-file copy, make sure to
@@ -308,7 +315,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
dir = filepath.Dir(dir)
}
if err := buildahCopiah.Put(dir, "", putOptions, reader); err != nil {
- return errors.Wrap(err, "error copying to host")
+ return fmt.Errorf("error copying to host: %w", err)
}
return nil
}
@@ -320,7 +327,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
return err
}
if err := copyFunc(); err != nil {
- return errors.Wrap(err, "error copying from container")
+ return fmt.Errorf("error copying from container: %w", err)
}
return nil
}
@@ -342,7 +349,7 @@ func copyToContainer(container string, containerPath string, hostPath string) er
// Make sure that host path exists.
hostInfo, err := copy.ResolveHostPath(hostPath)
if err != nil {
- return errors.Wrapf(err, "%q could not be found on the host", hostPath)
+ return fmt.Errorf("%q could not be found on the host: %w", hostPath, err)
}
containerBaseName, containerInfo, containerResolvedToParentDir, err := resolvePathOnDestinationContainer(container, containerPath, isStdin)
@@ -417,7 +424,7 @@ func copyToContainer(container string, containerPath string, hostPath string) er
getOptions.Rename = map[string]string{filepath.Base(hostTarget): containerBaseName}
}
if err := buildahCopiah.Get("/", "", getOptions, []string{hostTarget}, writer); err != nil {
- return errors.Wrap(err, "error copying from host")
+ return fmt.Errorf("error copying from host: %w", err)
}
return nil
}
@@ -429,12 +436,12 @@ func copyToContainer(container string, containerPath string, hostPath string) er
target = filepath.Dir(target)
}
- copyFunc, err := registry.ContainerEngine().ContainerCopyFromArchive(registry.GetContext(), container, target, reader, entities.CopyOptions{Chown: chown})
+ copyFunc, err := registry.ContainerEngine().ContainerCopyFromArchive(registry.GetContext(), container, target, reader, entities.CopyOptions{Chown: chown, NoOverwriteDirNonDir: !cpOpts.OverwriteDirNonDir})
if err != nil {
return err
}
if err := copyFunc(); err != nil {
- return errors.Wrap(err, "error copying to container")
+ return fmt.Errorf("error copying to container: %w", err)
}
return nil
}
@@ -449,11 +456,11 @@ func resolvePathOnDestinationContainer(container string, containerPath string, i
containerInfo, err = registry.ContainerEngine().ContainerStat(registry.GetContext(), container, containerPath)
if err == nil {
baseName = filepath.Base(containerInfo.LinkTarget)
- return // nolint: nilerr
+ return //nolint: nilerr
}
if strings.HasSuffix(containerPath, "/") {
- err = errors.Wrapf(err, "%q could not be found on container %s", containerPath, container)
+ err = fmt.Errorf("%q could not be found on container %s: %w", containerPath, container, err)
return
}
if isStdin {
@@ -474,13 +481,13 @@ func resolvePathOnDestinationContainer(container string, containerPath string, i
parentDir, err := containerParentDir(container, path)
if err != nil {
- err = errors.Wrapf(err, "could not determine parent dir of %q on container %s", path, container)
+ err = fmt.Errorf("could not determine parent dir of %q on container %s: %w", path, container, err)
return
}
containerInfo, err = registry.ContainerEngine().ContainerStat(registry.GetContext(), container, parentDir)
if err != nil {
- err = errors.Wrapf(err, "%q could not be found on container %s", containerPath, container)
+ err = fmt.Errorf("%q could not be found on container %s: %w", containerPath, container, err)
return
}
@@ -500,7 +507,7 @@ func containerParentDir(container string, containerPath string) (string, error)
return "", err
}
if len(inspectData) != 1 {
- return "", errors.Errorf("inspecting container %q: expected 1 data item but got %d", container, len(inspectData))
+ return "", fmt.Errorf("inspecting container %q: expected 1 data item but got %d", container, len(inspectData))
}
workDir := filepath.Join("/", inspectData[0].Config.WorkingDir)
workDir = filepath.Join(workDir, containerPath)
@@ -513,5 +520,5 @@ func validateFileInfo(info *copy.FileInfo) error {
if info.Mode.IsDir() || info.Mode.IsRegular() {
return nil
}
- return errors.Errorf("%q must be a directory or a regular file", info.LinkTarget)
+ return fmt.Errorf("%q must be a directory or a regular file", info.LinkTarget)
}
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index 29e138e30..7d0f4d9ae 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"os"
"strconv"
@@ -9,6 +10,7 @@ import (
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/cmd/podman/common"
@@ -20,7 +22,6 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/mattn/go-isatty"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -70,6 +71,7 @@ func createFlags(cmd *cobra.Command) {
)
flags.SetInterspersed(false)
+ common.DefineCreateDefaults(&cliVals)
common.DefineCreateFlags(cmd, &cliVals, false, false)
common.DefineNetFlags(cmd)
@@ -101,28 +103,37 @@ func init() {
createFlags(containerCreateCommand)
}
-func create(cmd *cobra.Command, args []string) error {
- var (
- err error
- )
+func commonFlags(cmd *cobra.Command) error {
+ var err error
flags := cmd.Flags()
cliVals.Net, err = common.NetFlagsToNetOptions(nil, *flags)
if err != nil {
return err
}
+ if cmd.Flags().Changed("image-volume") {
+ cliVals.ImageVolume = cmd.Flag("image-volume").Value.String()
+ }
+ return nil
+}
+
+func create(cmd *cobra.Command, args []string) error {
+ if err := commonFlags(cmd); err != nil {
+ return err
+ }
+
// Check if initctr is used with --pod and the value is correct
if initctr := InitContainerType; cmd.Flags().Changed("init-ctr") {
if !cmd.Flags().Changed("pod") {
return errors.New("must specify pod value with init-ctr")
}
- if !util.StringInSlice(initctr, []string{define.AlwaysInitContainer, define.OneShotInitContainer}) {
- return errors.Errorf("init-ctr value must be '%s' or '%s'", define.AlwaysInitContainer, define.OneShotInitContainer)
+ if !cutil.StringInSlice(initctr, []string{define.AlwaysInitContainer, define.OneShotInitContainer}) {
+ return fmt.Errorf("init-ctr value must be '%s' or '%s'", define.AlwaysInitContainer, define.OneShotInitContainer)
}
cliVals.InitContainerType = initctr
}
- cliVals, err = CreateInit(cmd, cliVals, false)
+ cliVals, err := CreateInit(cmd, cliVals, false)
if err != nil {
return err
}
@@ -130,7 +141,7 @@ func create(cmd *cobra.Command, args []string) error {
rawImageName := ""
if !cliVals.RootFS {
rawImageName = args[0]
- name, err := PullImage(args[0], cliVals)
+ name, err := PullImage(args[0], &cliVals)
if err != nil {
return err
}
@@ -206,17 +217,13 @@ func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra
}
if !isInfra {
- if c.Flag("shm-size").Changed {
- vals.ShmSize = c.Flag("shm-size").Value.String()
- }
if c.Flag("cpu-period").Changed && c.Flag("cpus").Changed {
- return vals, errors.Errorf("--cpu-period and --cpus cannot be set together")
+ return vals, errors.New("--cpu-period and --cpus cannot be set together")
}
if c.Flag("cpu-quota").Changed && c.Flag("cpus").Changed {
- return vals, errors.Errorf("--cpu-quota and --cpus cannot be set together")
+ return vals, errors.New("--cpu-quota and --cpus cannot be set together")
}
vals.IPC = c.Flag("ipc").Value.String()
- vals.UTS = c.Flag("uts").Value.String()
vals.PID = c.Flag("pid").Value.String()
vals.CgroupNS = c.Flag("cgroupns").Value.String()
@@ -260,27 +267,30 @@ func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra
if c.Flags().Changed("env") {
env, err := c.Flags().GetStringArray("env")
if err != nil {
- return vals, errors.Wrapf(err, "retrieve env flag")
+ return vals, fmt.Errorf("retrieve env flag: %w", err)
}
vals.Env = env
}
if c.Flag("cgroups").Changed && vals.CgroupsMode == "split" && registry.IsRemote() {
- return vals, errors.Errorf("the option --cgroups=%q is not supported in remote mode", vals.CgroupsMode)
+ return vals, fmt.Errorf("the option --cgroups=%q is not supported in remote mode", vals.CgroupsMode)
}
if c.Flag("pod").Changed && !strings.HasPrefix(c.Flag("pod").Value.String(), "new:") && c.Flag("userns").Changed {
- return vals, errors.Errorf("--userns and --pod cannot be set together")
+ return vals, errors.New("--userns and --pod cannot be set together")
}
}
+ if c.Flag("shm-size").Changed {
+ vals.ShmSize = c.Flag("shm-size").Value.String()
+ }
if (c.Flag("dns").Changed || c.Flag("dns-opt").Changed || c.Flag("dns-search").Changed) && vals.Net != nil && (vals.Net.Network.NSMode == specgen.NoNetwork || vals.Net.Network.IsContainer()) {
- return vals, errors.Errorf("conflicting options: dns and the network mode: " + string(vals.Net.Network.NSMode))
+ return vals, fmt.Errorf("conflicting options: dns and the network mode: " + string(vals.Net.Network.NSMode))
}
noHosts, err := c.Flags().GetBool("no-hosts")
if err != nil {
return vals, err
}
if noHosts && c.Flag("add-host").Changed {
- return vals, errors.Errorf("--no-hosts and --add-host cannot be set together")
+ return vals, errors.New("--no-hosts and --add-host cannot be set together")
}
if !isInfra && c.Flag("entrypoint").Changed {
@@ -294,7 +304,8 @@ func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra
return vals, nil
}
-func PullImage(imageName string, cliVals entities.ContainerCreateOptions) (string, error) {
+// Pulls image if any also parses and populates OS, Arch and Variant in specified container create options
+func PullImage(imageName string, cliVals *entities.ContainerCreateOptions) (string, error) {
pullPolicy, err := config.ParsePullPolicy(cliVals.Pull)
if err != nil {
return "", err
@@ -303,7 +314,7 @@ func PullImage(imageName string, cliVals entities.ContainerCreateOptions) (strin
if cliVals.Platform != "" || cliVals.Arch != "" || cliVals.OS != "" {
if cliVals.Platform != "" {
if cliVals.Arch != "" || cliVals.OS != "" {
- return "", errors.Errorf("--platform option can not be specified with --arch or --os")
+ return "", errors.New("--platform option can not be specified with --arch or --os")
}
split := strings.SplitN(cliVals.Platform, "/", 2)
cliVals.OS = split[0]
@@ -351,7 +362,7 @@ func createPodIfNecessary(cmd *cobra.Command, s *specgen.SpecGenerator, netOpts
}
podName := strings.Replace(s.Pod, "new:", "", 1)
if len(podName) < 1 {
- return errors.Errorf("new pod name must be at least one character")
+ return errors.New("new pod name must be at least one character")
}
var err error
diff --git a/cmd/podman/containers/diff.go b/cmd/podman/containers/diff.go
index e1a8ea729..ce501f890 100644
--- a/cmd/podman/containers/diff.go
+++ b/cmd/podman/containers/diff.go
@@ -1,13 +1,14 @@
package containers
import (
+ "errors"
+
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/diff"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -32,13 +33,9 @@ func init() {
Parent: containerCmd,
})
- diffOpts = &entities.DiffOptions{}
+ diffOpts = new(entities.DiffOptions)
flags := diffCmd.Flags()
- // FIXME: Why does this exists? It is not used anywhere.
- flags.BoolVar(&diffOpts.Archive, "archive", true, "Save the diff as a tar archive")
- _ = flags.MarkHidden("archive")
-
formatFlagName := "format"
flags.StringVar(&diffOpts.Format, formatFlagName, "", "Change the output format (json)")
_ = diffCmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(nil))
diff --git a/cmd/podman/containers/exec.go b/cmd/podman/containers/exec.go
index f5b93a96a..303795489 100644
--- a/cmd/podman/containers/exec.go
+++ b/cmd/podman/containers/exec.go
@@ -2,6 +2,7 @@ package containers
import (
"bufio"
+ "errors"
"fmt"
"os"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
envLib "github.com/containers/podman/v4/pkg/env"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -126,14 +126,14 @@ func exec(_ *cobra.Command, args []string) error {
cliEnv, err := envLib.ParseSlice(envInput)
if err != nil {
- return errors.Wrap(err, "error parsing environment variables")
+ return fmt.Errorf("error parsing environment variables: %w", err)
}
execOpts.Envs = envLib.Join(execOpts.Envs, cliEnv)
for fd := 3; fd < int(3+execOpts.PreserveFDs); fd++ {
if !rootless.IsFdInherited(fd) {
- return errors.Errorf("file descriptor %d is not available - the preserve-fds option requires that file descriptors must be passed", fd)
+ return fmt.Errorf("file descriptor %d is not available - the preserve-fds option requires that file descriptors must be passed", fd)
}
}
diff --git a/cmd/podman/containers/export.go b/cmd/podman/containers/export.go
index 681df93e0..d78bfe960 100644
--- a/cmd/podman/containers/export.go
+++ b/cmd/podman/containers/export.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"os"
"github.com/containers/common/pkg/completion"
@@ -9,7 +10,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/parse"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/term"
)
@@ -70,7 +70,7 @@ func export(cmd *cobra.Command, args []string) error {
if len(exportOpts.Output) == 0 {
file := os.Stdout
if term.IsTerminal(int(file.Fd())) {
- return errors.Errorf("refusing to export to terminal. Use -o flag or redirect")
+ return errors.New("refusing to export to terminal. Use -o flag or redirect")
}
exportOpts.Output = "/dev/stdout"
} else if err := parse.ValidateFileName(exportOpts.Output); err != nil {
diff --git a/cmd/podman/containers/init.go b/cmd/podman/containers/init.go
index 7336a2332..649cdf1c9 100644
--- a/cmd/podman/containers/init.go
+++ b/cmd/podman/containers/init.go
@@ -21,7 +21,7 @@ var (
Long: initDescription,
RunE: initContainer,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompleteContainersCreated,
Example: `podman init --latest
diff --git a/cmd/podman/containers/kill.go b/cmd/podman/containers/kill.go
index e994fbf2c..5a5379389 100644
--- a/cmd/podman/containers/kill.go
+++ b/cmd/podman/containers/kill.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"strings"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/signal"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -25,7 +25,7 @@ var (
Long: killDescription,
RunE: kill,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "cidfile")
},
ValidArgsFunction: common.AutocompleteContainersRunning,
Example: `podman kill mywebserver
@@ -35,7 +35,7 @@ var (
containerKillCommand = &cobra.Command{
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "cidfile")
},
Use: killCommand.Use,
Short: killCommand.Short,
@@ -97,7 +97,7 @@ func kill(_ *cobra.Command, args []string) error {
for _, cidFile := range cidFiles {
content, err := ioutil.ReadFile(cidFile)
if err != nil {
- return errors.Wrap(err, "error reading CIDFile")
+ return fmt.Errorf("error reading CIDFile: %w", err)
}
id := strings.Split(string(content), "\n")[0]
args = append(args, id)
diff --git a/cmd/podman/containers/logs.go b/cmd/podman/containers/logs.go
index 374bf6b1c..9a9749210 100644
--- a/cmd/podman/containers/logs.go
+++ b/cmd/podman/containers/logs.go
@@ -1,6 +1,8 @@
package containers
import (
+ "errors"
+ "fmt"
"os"
"github.com/containers/common/pkg/completion"
@@ -9,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -125,7 +126,7 @@ func logs(_ *cobra.Command, args []string) error {
// parse time, error out if something is wrong
since, err := util.ParseInputTime(logsOptions.SinceRaw, true)
if err != nil {
- return errors.Wrapf(err, "error parsing --since %q", logsOptions.SinceRaw)
+ return fmt.Errorf("error parsing --since %q: %w", logsOptions.SinceRaw, err)
}
logsOptions.Since = since
}
@@ -133,7 +134,7 @@ func logs(_ *cobra.Command, args []string) error {
// parse time, error out if something is wrong
until, err := util.ParseInputTime(logsOptions.UntilRaw, false)
if err != nil {
- return errors.Wrapf(err, "error parsing --until %q", logsOptions.UntilRaw)
+ return fmt.Errorf("error parsing --until %q: %w", logsOptions.UntilRaw, err)
}
logsOptions.Until = until
}
diff --git a/cmd/podman/containers/mount.go b/cmd/podman/containers/mount.go
index 18177e3ce..a1f0b0cf3 100644
--- a/cmd/podman/containers/mount.go
+++ b/cmd/podman/containers/mount.go
@@ -1,6 +1,7 @@
package containers
import (
+ "errors"
"fmt"
"os"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -33,7 +33,7 @@ var (
Long: mountDescription,
RunE: mount,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, true, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, true, "")
},
ValidArgsFunction: common.AutocompleteContainers,
}
@@ -83,7 +83,7 @@ func init() {
func mount(cmd *cobra.Command, args []string) error {
if len(args) > 0 && mountOpts.Latest {
- return errors.Errorf("--latest and containers cannot be used together")
+ return errors.New("--latest and containers cannot be used together")
}
reports, err := registry.ContainerEngine().ContainerMount(registry.GetContext(), args, mountOpts)
if err != nil {
@@ -108,7 +108,7 @@ func mount(cmd *cobra.Command, args []string) error {
case mountOpts.Format == "":
break // print defaults
default:
- return errors.Errorf("unknown --format argument: %q", mountOpts.Format)
+ return fmt.Errorf("unknown --format argument: %q", mountOpts.Format)
}
mrs := make([]mountReporter, 0, len(reports))
diff --git a/cmd/podman/containers/pause.go b/cmd/podman/containers/pause.go
index 9dbe97d11..3c26fd5c8 100644
--- a/cmd/podman/containers/pause.go
+++ b/cmd/podman/containers/pause.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"github.com/containers/common/pkg/cgroups"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -73,7 +73,7 @@ func pause(cmd *cobra.Command, args []string) error {
}
if len(args) < 1 && !pauseOpts.All {
- return errors.Errorf("you must provide at least one container name or id")
+ return errors.New("you must provide at least one container name or id")
}
responses, err := registry.ContainerEngine().ContainerPause(context.Background(), args, pauseOpts)
if err != nil {
diff --git a/cmd/podman/containers/port.go b/cmd/podman/containers/port.go
index 22d1d16d3..74bfdf5c6 100644
--- a/cmd/podman/containers/port.go
+++ b/cmd/podman/containers/port.go
@@ -1,6 +1,7 @@
package containers
import (
+ "errors"
"fmt"
"strconv"
"strings"
@@ -9,13 +10,12 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
- portDescription = `List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT
+ portDescription = `List port mappings for the CONTAINER, or look up the public-facing port that is NAT-ed to the PRIVATE_PORT
`
portCommand = &cobra.Command{
Use: "port [options] CONTAINER [PORT]",
@@ -23,7 +23,7 @@ var (
Long: portDescription,
RunE: port,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, true, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, true, "")
},
ValidArgsFunction: common.AutocompleteContainerOneArg,
Example: `podman port --all
@@ -37,7 +37,7 @@ var (
Long: portDescription,
RunE: portCommand.RunE,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, true, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, true, "")
},
ValidArgsFunction: portCommand.ValidArgsFunction,
Example: `podman container port --all
@@ -77,14 +77,14 @@ func port(_ *cobra.Command, args []string) error {
)
if len(args) == 0 && !portOpts.Latest && !portOpts.All {
- return errors.Errorf("you must supply a running container name or id")
+ return errors.New("you must supply a running container name or id")
}
if !portOpts.Latest && len(args) >= 1 {
container = args[0]
}
port := ""
if len(args) > 2 {
- return errors.Errorf("`port` accepts at most 2 arguments")
+ return errors.New("`port` accepts at most 2 arguments")
}
if len(args) > 1 && !portOpts.Latest {
port = args[1]
@@ -95,7 +95,7 @@ func port(_ *cobra.Command, args []string) error {
if len(port) > 0 {
fields := strings.Split(port, "/")
if len(fields) > 2 || len(fields) < 1 {
- return errors.Errorf("port formats are port/protocol. '%s' is invalid", port)
+ return fmt.Errorf("port formats are port/protocol. '%s' is invalid", port)
}
if len(fields) == 1 {
fields = append(fields, "tcp")
@@ -149,7 +149,7 @@ func port(_ *cobra.Command, args []string) error {
}
}
if !found && port != "" {
- return errors.Errorf("failed to find published port %q", port)
+ return fmt.Errorf("failed to find published port %q", port)
}
}
return nil
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index a011a8ae6..12b7f5dae 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -1,6 +1,7 @@
package containers
import (
+ "errors"
"fmt"
"os"
"strings"
@@ -16,7 +17,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -108,18 +108,18 @@ func listFlagSet(cmd *cobra.Command) {
func checkFlags(c *cobra.Command) error {
// latest, and last are mutually exclusive.
if listOpts.Last >= 0 && listOpts.Latest {
- return errors.Errorf("last and latest are mutually exclusive")
+ return errors.New("last and latest are mutually exclusive")
}
// Quiet conflicts with size and namespace and is overridden by a Go
// template.
if listOpts.Quiet {
if listOpts.Size || listOpts.Namespace {
- return errors.Errorf("quiet conflicts with size and namespace")
+ return errors.New("quiet conflicts with size and namespace")
}
}
// Size and namespace conflict with each other
if listOpts.Size && listOpts.Namespace {
- return errors.Errorf("size and namespace options conflict")
+ return errors.New("size and namespace options conflict")
}
if listOpts.Watch > 0 && listOpts.Latest {
@@ -191,7 +191,7 @@ func ps(cmd *cobra.Command, _ []string) error {
for _, f := range filters {
split := strings.SplitN(f, "=", 2)
if len(split) == 1 {
- return errors.Errorf("invalid filter %q", f)
+ return fmt.Errorf("invalid filter %q", f)
}
listOpts.Filters[split[0]] = append(listOpts.Filters[split[0]], split[1])
}
diff --git a/cmd/podman/containers/rename.go b/cmd/podman/containers/rename.go
index 1c0b28ce3..d78492ae4 100644
--- a/cmd/podman/containers/rename.go
+++ b/cmd/podman/containers/rename.go
@@ -1,10 +1,11 @@
package containers
import (
+ "errors"
+
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -44,7 +45,7 @@ func init() {
func rename(cmd *cobra.Command, args []string) error {
if len(args) > 2 {
- return errors.Errorf("must provide at least two arguments to rename")
+ return errors.New("must provide at least two arguments to rename")
}
renameOpts := entities.ContainerRenameOptions{
NewName: args[1],
diff --git a/cmd/podman/containers/restart.go b/cmd/podman/containers/restart.go
index 69d8d71ea..9d704d671 100644
--- a/cmd/podman/containers/restart.go
+++ b/cmd/podman/containers/restart.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -26,7 +25,7 @@ var (
Long: restartDescription,
RunE: restart,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompleteContainers,
Example: `podman restart ctrID
@@ -85,10 +84,10 @@ func restart(cmd *cobra.Command, args []string) error {
errs utils.OutputErrors
)
if len(args) < 1 && !restartOptions.Latest && !restartOptions.All {
- return errors.Wrapf(define.ErrInvalidArg, "you must provide at least one container name or ID")
+ return fmt.Errorf("you must provide at least one container name or ID: %w", define.ErrInvalidArg)
}
if len(args) > 0 && restartOptions.Latest {
- return errors.Wrapf(define.ErrInvalidArg, "--latest and containers cannot be used together")
+ return fmt.Errorf("--latest and containers cannot be used together: %w", define.ErrInvalidArg)
}
if cmd.Flag("time").Changed {
diff --git a/cmd/podman/containers/restore.go b/cmd/podman/containers/restore.go
index eeda5a05f..1e4745354 100644
--- a/cmd/podman/containers/restore.go
+++ b/cmd/podman/containers/restore.go
@@ -28,7 +28,7 @@ var (
Long: restoreDescription,
RunE: restore,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, true, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, true, "")
},
ValidArgsFunction: common.AutocompleteContainersAndImages,
Example: `podman container restore ctrID
diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go
index 420e3c38d..9fa688d23 100644
--- a/cmd/podman/containers/rm.go
+++ b/cmd/podman/containers/rm.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"strings"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -28,7 +28,7 @@ var (
Long: rmDescription,
RunE: rm,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "cidfile")
},
ValidArgsFunction: common.AutocompleteContainers,
Example: `podman rm imageID
@@ -104,7 +104,7 @@ func rm(cmd *cobra.Command, args []string) error {
for _, cidFile := range cidFiles {
content, err := ioutil.ReadFile(cidFile)
if err != nil {
- return errors.Wrap(err, "error reading CIDFile")
+ return fmt.Errorf("error reading CIDFile: %w", err)
}
id := strings.Split(string(content), "\n")[0]
args = append(args, id)
@@ -123,9 +123,7 @@ func rm(cmd *cobra.Command, args []string) error {
// removeContainers will set the exit code according to the `podman-rm` man
// page.
func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit bool) error {
- var (
- errs utils.OutputErrors
- )
+ var errs utils.OutputErrors
responses, err := registry.ContainerEngine().ContainerRm(context.Background(), namesOrIDs, rmOptions)
if err != nil {
if setExit {
@@ -135,8 +133,7 @@ func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit
}
for _, r := range responses {
if r.Err != nil {
- // TODO this will not work with the remote client
- if errors.Cause(err) == define.ErrWillDeadlock {
+ if errors.Is(r.Err, define.ErrWillDeadlock) {
logrus.Errorf("Potential deadlock detected - please run 'podman system renumber' to resolve")
}
if setExit {
@@ -155,15 +152,9 @@ func setExitCode(err error) {
if registry.GetExitCode() == 1 {
return
}
- cause := errors.Cause(err)
- switch {
- case cause == define.ErrNoSuchCtr:
+ if errors.Is(err, define.ErrNoSuchCtr) || strings.Contains(err.Error(), define.ErrNoSuchCtr.Error()) {
registry.SetExitCode(1)
- case strings.Contains(cause.Error(), define.ErrNoSuchCtr.Error()):
- registry.SetExitCode(1)
- case cause == define.ErrCtrStateInvalid:
- registry.SetExitCode(2)
- case strings.Contains(cause.Error(), define.ErrCtrStateInvalid.Error()):
+ } else if errors.Is(err, define.ErrCtrStateInvalid) || strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
registry.SetExitCode(2)
}
}
diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go
index 951981293..ef13ea95e 100644
--- a/cmd/podman/containers/run.go
+++ b/cmd/podman/containers/run.go
@@ -15,7 +15,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgenutil"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/term"
@@ -61,6 +60,7 @@ func runFlags(cmd *cobra.Command) {
flags := cmd.Flags()
flags.SetInterspersed(false)
+ common.DefineCreateDefaults(&cliVals)
common.DefineCreateFlags(cmd, &cliVals, false, false)
common.DefineNetFlags(cmd)
@@ -109,7 +109,9 @@ func init() {
}
func run(cmd *cobra.Command, args []string) error {
- var err error
+ if err := commonFlags(cmd); err != nil {
+ return err
+ }
// TODO: Breaking change should be made fatal in next major Release
if cliVals.TTY && cliVals.Interactive && !term.IsTerminal(int(os.Stdin.Fd())) {
@@ -122,20 +124,16 @@ func run(cmd *cobra.Command, args []string) error {
}
}
- flags := cmd.Flags()
- cliVals.Net, err = common.NetFlagsToNetOptions(nil, *flags)
- if err != nil {
- return err
- }
runOpts.CIDFile = cliVals.CIDFile
runOpts.Rm = cliVals.Rm
- if cliVals, err = CreateInit(cmd, cliVals, false); err != nil {
+ cliVals, err := CreateInit(cmd, cliVals, false)
+ if err != nil {
return err
}
for fd := 3; fd < int(3+runOpts.PreserveFDs); fd++ {
if !rootless.IsFdInherited(fd) {
- return errors.Errorf("file descriptor %d is not available - the preserve-fds option requires that file descriptors must be passed", fd)
+ return fmt.Errorf("file descriptor %d is not available - the preserve-fds option requires that file descriptors must be passed", fd)
}
}
@@ -143,7 +141,7 @@ func run(cmd *cobra.Command, args []string) error {
rawImageName := ""
if !cliVals.RootFS {
rawImageName = args[0]
- name, err := PullImage(args[0], cliVals)
+ name, err := PullImage(args[0], &cliVals)
if err != nil {
return err
}
@@ -166,7 +164,7 @@ func run(cmd *cobra.Command, args []string) error {
// If attach is set, clear stdin/stdout/stderr and only attach requested
if cmd.Flag("attach").Changed {
if passthrough {
- return errors.Wrapf(define.ErrInvalidArg, "cannot specify --attach with --log-driver=passthrough")
+ return fmt.Errorf("cannot specify --attach with --log-driver=passthrough: %w", define.ErrInvalidArg)
}
runOpts.OutputStream = nil
runOpts.ErrorStream = nil
@@ -183,7 +181,7 @@ func run(cmd *cobra.Command, args []string) error {
case "stdin":
runOpts.InputStream = os.Stdin
default:
- return errors.Wrapf(define.ErrInvalidArg, "invalid stream %q for --attach - must be one of stdin, stdout, or stderr", stream)
+ return fmt.Errorf("invalid stream %q for --attach - must be one of stdin, stdout, or stderr: %w", stream, define.ErrInvalidArg)
}
}
}
@@ -194,6 +192,9 @@ func run(cmd *cobra.Command, args []string) error {
return err
}
s.RawImageName = rawImageName
+ s.ImageOS = cliVals.OS
+ s.ImageArch = cliVals.Arch
+ s.ImageVariant = cliVals.Variant
s.Passwd = &runOpts.Passwd
runOpts.Spec = s
diff --git a/cmd/podman/containers/start.go b/cmd/podman/containers/start.go
index b70e975b7..cd4fa17b8 100644
--- a/cmd/podman/containers/start.go
+++ b/cmd/podman/containers/start.go
@@ -1,6 +1,7 @@
package containers
import (
+ "errors"
"fmt"
"os"
"strings"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -88,19 +88,19 @@ func validateStart(cmd *cobra.Command, args []string) error {
return errors.New("start requires at least one argument")
}
if startOptions.All && startOptions.Latest {
- return errors.Errorf("--all and --latest cannot be used together")
+ return errors.New("--all and --latest cannot be used together")
}
if len(args) > 0 && startOptions.Latest {
- return errors.Errorf("--latest and containers cannot be used together")
+ return errors.New("--latest and containers cannot be used together")
}
if len(args) > 1 && startOptions.Attach {
- return errors.Errorf("you cannot start and attach multiple containers at once")
+ return errors.New("you cannot start and attach multiple containers at once")
}
if (len(args) > 0 || startOptions.Latest) && startOptions.All {
- return errors.Errorf("either start all containers or the container(s) provided in the arguments")
+ return errors.New("either start all containers or the container(s) provided in the arguments")
}
if startOptions.Attach && startOptions.All {
- return errors.Errorf("you cannot start and attach all containers at once")
+ return errors.New("you cannot start and attach all containers at once")
}
return nil
}
@@ -114,7 +114,7 @@ func start(cmd *cobra.Command, args []string) error {
startOptions.SigProxy = sigProxy
if sigProxy && !startOptions.Attach {
- return errors.Wrapf(define.ErrInvalidArg, "you cannot use sig-proxy without --attach")
+ return fmt.Errorf("you cannot use sig-proxy without --attach: %w", define.ErrInvalidArg)
}
if startOptions.Attach {
startOptions.Stdin = os.Stdin
@@ -127,7 +127,7 @@ func start(cmd *cobra.Command, args []string) error {
for _, f := range filters {
split := strings.SplitN(f, "=", 2)
if len(split) == 1 {
- return errors.Errorf("invalid filter %q", f)
+ return fmt.Errorf("invalid filter %q", f)
}
startOptions.Filters[split[0]] = append(startOptions.Filters[split[0]], split[1])
}
diff --git a/cmd/podman/containers/stats.go b/cmd/podman/containers/stats.go
index 500671d31..0dd8ce80a 100644
--- a/cmd/podman/containers/stats.go
+++ b/cmd/podman/containers/stats.go
@@ -1,6 +1,7 @@
package containers
import (
+ "errors"
"fmt"
"os"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/utils"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -105,7 +105,7 @@ func checkStatOptions(cmd *cobra.Command, args []string) error {
opts++
}
if opts > 1 {
- return errors.Errorf("--all, --latest and containers cannot be used together")
+ return errors.New("--all, --latest and containers cannot be used together")
}
return nil
}
@@ -214,10 +214,6 @@ func (s *containerStats) BlockIO() string {
}
func (s *containerStats) PIDS() string {
- if s.PIDs == 0 {
- // If things go bazinga, return a safe value
- return "--"
- }
return fmt.Sprintf("%d", s.PIDs)
}
@@ -231,7 +227,7 @@ func (s *containerStats) MemUsageBytes() string {
func floatToPercentString(f float64) string {
strippedFloat, err := utils.RemoveScientificNotationFromFloat(f)
- if err != nil || strippedFloat == 0 {
+ if err != nil {
// If things go bazinga, return a safe value
return "--"
}
@@ -239,25 +235,19 @@ func floatToPercentString(f float64) string {
}
func combineHumanValues(a, b uint64) string {
- if a == 0 && b == 0 {
- return "-- / --"
- }
return fmt.Sprintf("%s / %s", units.HumanSize(float64(a)), units.HumanSize(float64(b)))
}
func combineBytesValues(a, b uint64) string {
- if a == 0 && b == 0 {
- return "-- / --"
- }
return fmt.Sprintf("%s / %s", units.BytesSize(float64(a)), units.BytesSize(float64(b)))
}
func outputJSON(stats []containerStats) error {
type jstat struct {
- Id string `json:"id"` // nolint
+ Id string `json:"id"` //nolint:revive,stylecheck
Name string `json:"name"`
CPUTime string `json:"cpu_time"`
- CpuPercent string `json:"cpu_percent"` // nolint
+ CpuPercent string `json:"cpu_percent"` //nolint:revive,stylecheck
AverageCPU string `json:"avg_cpu"`
MemUsage string `json:"mem_usage"`
MemPerc string `json:"mem_percent"`
diff --git a/cmd/podman/containers/stop.go b/cmd/podman/containers/stop.go
index af2250abb..2ddd169a1 100644
--- a/cmd/podman/containers/stop.go
+++ b/cmd/podman/containers/stop.go
@@ -12,7 +12,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -26,7 +25,7 @@ var (
Long: stopDescription,
RunE: stop,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "cidfile")
},
ValidArgsFunction: common.AutocompleteContainersRunning,
Example: `podman stop ctrID
@@ -40,7 +39,7 @@ var (
Long: stopCommand.Long,
RunE: stopCommand.RunE,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "cidfile")
},
ValidArgsFunction: stopCommand.ValidArgsFunction,
Example: `podman container stop ctrID
@@ -102,7 +101,7 @@ func stop(cmd *cobra.Command, args []string) error {
for _, cidFile := range cidFiles {
content, err := ioutil.ReadFile(cidFile)
if err != nil {
- return errors.Wrap(err, "error reading CIDFile")
+ return fmt.Errorf("error reading CIDFile: %w", err)
}
id := strings.Split(string(content), "\n")[0]
args = append(args, id)
diff --git a/cmd/podman/containers/top.go b/cmd/podman/containers/top.go
index 389034e37..9340acd9a 100644
--- a/cmd/podman/containers/top.go
+++ b/cmd/podman/containers/top.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"os"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -88,7 +88,7 @@ func top(cmd *cobra.Command, args []string) error {
}
if len(args) < 1 && !topOptions.Latest {
- return errors.Errorf("you must provide the name or id of a running container")
+ return errors.New("you must provide the name or id of a running container")
}
if topOptions.Latest {
diff --git a/cmd/podman/containers/unmount.go b/cmd/podman/containers/unmount.go
index 26b8cfcc5..6869de2e2 100644
--- a/cmd/podman/containers/unmount.go
+++ b/cmd/podman/containers/unmount.go
@@ -27,7 +27,7 @@ var (
Long: description,
RunE: unmount,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompleteContainers,
Example: `podman unmount ctrID
@@ -43,7 +43,7 @@ var (
Long: unmountCommand.Long,
RunE: unmountCommand.RunE,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompleteContainers,
Example: `podman container unmount ctrID
diff --git a/cmd/podman/containers/unpause.go b/cmd/podman/containers/unpause.go
index eaf50f2c7..a5375e737 100644
--- a/cmd/podman/containers/unpause.go
+++ b/cmd/podman/containers/unpause.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"github.com/containers/common/pkg/cgroups"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -70,7 +70,7 @@ func unpause(cmd *cobra.Command, args []string) error {
}
}
if len(args) < 1 && !unPauseOptions.All {
- return errors.Errorf("you must provide at least one container name or id")
+ return errors.New("you must provide at least one container name or id")
}
responses, err := registry.ContainerEngine().ContainerUnpause(context.Background(), args, unPauseOptions)
if err != nil {
diff --git a/cmd/podman/containers/wait.go b/cmd/podman/containers/wait.go
index 720a696ce..5b8480781 100644
--- a/cmd/podman/containers/wait.go
+++ b/cmd/podman/containers/wait.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"fmt"
"time"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -86,7 +86,7 @@ func wait(cmd *cobra.Command, args []string) error {
}
if !waitOptions.Latest && len(args) == 0 {
- return errors.Errorf("%q requires a name, id, or the \"--latest\" flag", cmd.CommandPath())
+ return fmt.Errorf("%q requires a name, id, or the \"--latest\" flag", cmd.CommandPath())
}
if waitOptions.Latest && len(args) > 0 {
return errors.New("--latest and containers cannot be used together")
diff --git a/cmd/podman/diff.go b/cmd/podman/diff.go
index 7b78c8312..ec98fb5b5 100644
--- a/cmd/podman/diff.go
+++ b/cmd/podman/diff.go
@@ -35,9 +35,6 @@ func init() {
Command: diffCmd,
})
flags := diffCmd.Flags()
- // FIXME: Why does this exists? It is not used anywhere.
- flags.BoolVar(&diffOpts.Archive, "archive", true, "Save the diff as a tar archive")
- _ = flags.MarkHidden("archive")
formatFlagName := "format"
flags.StringVar(&diffOpts.Format, formatFlagName, "", "Change the output format (json)")
diff --git a/cmd/podman/diff/diff.go b/cmd/podman/diff/diff.go
index a26502de9..06df767d0 100644
--- a/cmd/podman/diff/diff.go
+++ b/cmd/podman/diff/diff.go
@@ -2,6 +2,7 @@ package diff
import (
"encoding/json"
+ "errors"
"fmt"
"os"
@@ -9,11 +10,10 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
-func Diff(cmd *cobra.Command, args []string, options entities.DiffOptions) error {
+func Diff(_ *cobra.Command, args []string, options entities.DiffOptions) error {
results, err := registry.ContainerEngine().Diff(registry.GetContext(), args, options)
if err != nil {
return err
@@ -46,7 +46,7 @@ func changesToJSON(diffs *entities.DiffReport) error {
case archive.ChangeModify:
body.Changed = append(body.Changed, row.Path)
default:
- return errors.Errorf("output kind %q not recognized", row.Kind)
+ return fmt.Errorf("output kind %q not recognized", row.Kind)
}
}
@@ -63,7 +63,7 @@ func changesToTable(diffs *entities.DiffReport) error {
return nil
}
-// IDOrLatestArgs used to validate a nameOrId was provided or the "--latest" flag
+// ValidateContainerDiffArgs used to validate a nameOrId was provided or the "--latest" flag
func ValidateContainerDiffArgs(cmd *cobra.Command, args []string) error {
given, _ := cmd.Flags().GetBool("latest")
if len(args) > 0 && !given {
@@ -73,7 +73,7 @@ func ValidateContainerDiffArgs(cmd *cobra.Command, args []string) error {
return errors.New("--latest and containers cannot be used together")
}
if len(args) == 0 && !given {
- return errors.Errorf("%q requires a name, id, or the \"--latest\" flag", cmd.CommandPath())
+ return fmt.Errorf("%q requires a name, id, or the \"--latest\" flag", cmd.CommandPath())
}
return nil
}
diff --git a/cmd/podman/early_init_linux.go b/cmd/podman/early_init_linux.go
index dfd9d1e30..e26c4264e 100644
--- a/cmd/podman/early_init_linux.go
+++ b/cmd/podman/early_init_linux.go
@@ -6,7 +6,6 @@ import (
"syscall"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
func setRLimits() error {
@@ -15,11 +14,11 @@ func setRLimits() error {
rlimits.Max = define.RLimitDefaultValue
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
- return errors.Wrapf(err, "error getting rlimits")
+ return fmt.Errorf("error getting rlimits: %w", err)
}
rlimits.Cur = rlimits.Max
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
- return errors.Wrapf(err, "error setting new rlimits")
+ return fmt.Errorf("error setting new rlimits: %w", err)
}
}
return nil
diff --git a/cmd/podman/generate/kube.go b/cmd/podman/generate/kube.go
index c4c92799c..7bfc3dcf7 100644
--- a/cmd/podman/generate/kube.go
+++ b/cmd/podman/generate/kube.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -68,10 +67,10 @@ func kube(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("filename") {
if _, err := os.Stat(kubeFile); err == nil {
- return errors.Errorf("cannot write to %q; file exists", kubeFile)
+ return fmt.Errorf("cannot write to %q; file exists", kubeFile)
}
if err := ioutil.WriteFile(kubeFile, content, 0644); err != nil {
- return errors.Wrapf(err, "cannot write to %q", kubeFile)
+ return fmt.Errorf("cannot write to %q: %w", kubeFile, err)
}
return nil
}
diff --git a/cmd/podman/generate/systemd.go b/cmd/podman/generate/systemd.go
index 0dab6299d..1ece64a30 100644
--- a/cmd/podman/generate/systemd.go
+++ b/cmd/podman/generate/systemd.go
@@ -2,6 +2,7 @@ package pods
import (
"encoding/json"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
systemDefine "github.com/containers/podman/v4/pkg/systemd/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -156,7 +156,7 @@ func systemd(cmd *cobra.Command, args []string) error {
if files {
cwd, err := os.Getwd()
if err != nil {
- return errors.Wrap(err, "error getting current working directory")
+ return fmt.Errorf("error getting current working directory: %w", err)
}
for name, content := range reports.Units {
path := filepath.Join(cwd, fmt.Sprintf("%s.service", name))
@@ -189,7 +189,7 @@ func systemd(cmd *cobra.Command, args []string) error {
case format == "":
return printDefault(reports.Units)
default:
- return errors.Errorf("unknown --format argument: %s", format)
+ return fmt.Errorf("unknown --format argument: %s", format)
}
}
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index 3ea60e18a..9f1b86eb4 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -1,6 +1,7 @@
package images
import (
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -23,7 +24,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -191,15 +191,15 @@ func buildFlags(cmd *cobra.Command) {
_ = flags.MarkHidden("compress")
_ = flags.MarkHidden("volume")
_ = flags.MarkHidden("output")
+ _ = flags.MarkHidden("logsplit")
}
}
// build executes the build command.
func build(cmd *cobra.Command, args []string) error {
if (cmd.Flags().Changed("squash") && cmd.Flags().Changed("layers")) ||
- (cmd.Flags().Changed("squash-all") && cmd.Flags().Changed("layers")) ||
(cmd.Flags().Changed("squash-all") && cmd.Flags().Changed("squash")) {
- return errors.New("cannot specify --squash, --squash-all and --layers options together")
+ return errors.New("cannot specify --squash with --layers and --squash-all with --squash")
}
if cmd.Flag("output").Changed && registry.IsRemote() {
@@ -222,7 +222,7 @@ func build(cmd *cobra.Command, args []string) error {
// The context directory could be a URL. Try to handle that.
tempDir, subDir, err := buildahDefine.TempDirForURL("", "buildah", args[0])
if err != nil {
- return errors.Wrapf(err, "error prepping temporary context directory")
+ return fmt.Errorf("error prepping temporary context directory: %w", err)
}
if tempDir != "" {
// We had to download it to a temporary directory.
@@ -237,7 +237,7 @@ func build(cmd *cobra.Command, args []string) error {
// Nope, it was local. Use it as is.
absDir, err := filepath.Abs(args[0])
if err != nil {
- return errors.Wrapf(err, "error determining path to directory %q", args[0])
+ return fmt.Errorf("error determining path to directory %q: %w", args[0], err)
}
contextDir = absDir
}
@@ -253,7 +253,7 @@ func build(cmd *cobra.Command, args []string) error {
}
absFile, err := filepath.Abs(containerFiles[i])
if err != nil {
- return errors.Wrapf(err, "error determining path to file %q", containerFiles[i])
+ return fmt.Errorf("error determining path to file %q: %w", containerFiles[i], err)
}
contextDir = filepath.Dir(absFile)
containerFiles[i] = absFile
@@ -262,10 +262,10 @@ func build(cmd *cobra.Command, args []string) error {
}
if contextDir == "" {
- return errors.Errorf("no context directory and no Containerfile specified")
+ return errors.New("no context directory and no Containerfile specified")
}
if !utils.IsDir(contextDir) {
- return errors.Errorf("context must be a directory: %q", contextDir)
+ return fmt.Errorf("context must be a directory: %q", contextDir)
}
if len(containerFiles) == 0 {
if utils.FileExists(filepath.Join(contextDir, "Containerfile")) {
@@ -296,14 +296,15 @@ func build(cmd *cobra.Command, args []string) error {
if registry.IsRemote() {
// errors from server does not contain ExitCode
// so parse exit code from error message
- remoteExitCode, parseErr := utils.ExitCodeFromBuildError(fmt.Sprint(errors.Cause(err)))
+ remoteExitCode, parseErr := utils.ExitCodeFromBuildError(err.Error())
if parseErr == nil {
exitCode = remoteExitCode
}
}
- if ee, ok := (errors.Cause(err)).(*exec.ExitError); ok {
- exitCode = ee.ExitCode()
+ exitError := &exec.ExitError{}
+ if errors.As(err, &exitError) {
+ exitCode = exitError.ExitCode()
}
registry.SetExitCode(exitCode)
@@ -356,7 +357,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
}
if pullFlagsCount > 1 {
- return nil, errors.Errorf("can only set one of 'pull' or 'pull-always' or 'pull-never'")
+ return nil, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
}
// Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always
@@ -418,7 +419,13 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
// Squash-all invoked, squash both new and old layers into one.
if c.Flags().Changed("squash-all") {
flags.Squash = true
- flags.Layers = false
+ if !c.Flags().Changed("layers") {
+ // Buildah supports using layers and --squash together
+ // after https://github.com/containers/buildah/pull/3674
+ // so podman must honor if user wants to still use layers
+ // with --squash-all.
+ flags.Layers = false
+ }
}
var stdin io.Reader
@@ -442,22 +449,6 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
return nil, err
}
- // `buildah bud --layers=false` acts like `docker build --squash` does.
- // That is all of the new layers created during the build process are
- // condensed into one, any layers present prior to this build are retained
- // without condensing. `buildah bud --squash` squashes both new and old
- // layers down into one. Translate Podman commands into Buildah.
- // Squash invoked, retain old layers, squash new layers into one.
- if c.Flags().Changed("squash") && flags.Squash {
- flags.Squash = false
- flags.Layers = false
- }
- // Squash-all invoked, squash both new and old layers into one.
- if c.Flags().Changed("squash-all") {
- flags.Squash = true
- flags.Layers = false
- }
-
compression := buildahDefine.Gzip
if flags.DisableCompression {
compression = buildahDefine.Uncompressed
@@ -487,7 +478,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
case strings.HasPrefix(flags.Format, buildahDefine.DOCKER):
format = buildahDefine.Dockerv2ImageManifest
default:
- return nil, errors.Errorf("unrecognized image type %q", flags.Format)
+ return nil, fmt.Errorf("unrecognized image type %q", flags.Format)
}
runtimeFlags := []string{}
@@ -510,12 +501,29 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
decConfig, err := getDecryptConfig(flags.DecryptionKeys)
if err != nil {
- return nil, errors.Wrapf(err, "unable to obtain decrypt config")
+ return nil, fmt.Errorf("unable to obtain decrypt config: %w", err)
+ }
+
+ additionalBuildContext := make(map[string]*buildahDefine.AdditionalBuildContext)
+ if c.Flag("build-context").Changed {
+ for _, contextString := range flags.BuildContext {
+ av := strings.SplitN(contextString, "=", 2)
+ if len(av) > 1 {
+ parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1])
+ if err != nil {
+ return nil, fmt.Errorf("while parsing additional build context: %w", err)
+ }
+ additionalBuildContext[av[0]] = &parseAdditionalBuildContext
+ } else {
+ return nil, fmt.Errorf("while parsing additional build context: %q, accepts value in the form of key=value", av)
+ }
+ }
}
opts := buildahDefine.BuildOptions{
AddCapabilities: flags.CapAdd,
AdditionalTags: tags,
+ AdditionalBuildContexts: additionalBuildContext,
AllPlatforms: flags.AllPlatforms,
Annotations: flags.Annotation,
Args: args,
@@ -525,6 +533,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
Compression: compression,
ConfigureNetwork: networkPolicy,
ContextDirectory: contextDir,
+ CPPFlags: flags.CPPFlags,
DefaultMountsFilePath: containerConfig.Containers.DefaultMountsFile,
Devices: flags.Devices,
DropCapabilities: flags.CapDrop,
@@ -539,6 +548,8 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
Labels: flags.Label,
Layers: flags.Layers,
LogRusage: flags.LogRusage,
+ LogFile: flags.Logfile,
+ LogSplitByPlatform: flags.LogSplitByPlatform,
Manifest: flags.Manifest,
MaxPullPushRetries: 3,
NamespaceOptions: nsValues,
@@ -570,7 +581,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
if flags.IgnoreFile != "" {
excludes, err := parseDockerignore(flags.IgnoreFile)
if err != nil {
- return nil, errors.Wrapf(err, "unable to obtain decrypt config")
+ return nil, fmt.Errorf("unable to obtain decrypt config: %w", err)
}
opts.Excludes = excludes
}
@@ -589,7 +600,7 @@ func getDecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error)
// decryption
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
if err != nil {
- return nil, errors.Wrapf(err, "invalid decryption keys")
+ return nil, fmt.Errorf("invalid decryption keys: %w", err)
}
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
decConfig = cc.DecryptConfig
diff --git a/cmd/podman/images/diff.go b/cmd/podman/images/diff.go
index 13a8f1d9d..a017d569d 100644
--- a/cmd/podman/images/diff.go
+++ b/cmd/podman/images/diff.go
@@ -34,9 +34,7 @@ func init() {
}
func diffFlags(flags *pflag.FlagSet) {
- diffOpts = &entities.DiffOptions{}
- flags.BoolVar(&diffOpts.Archive, "archive", true, "Save the diff as a tar archive")
- _ = flags.MarkDeprecated("archive", "Provided for backwards compatibility, has no impact on output.")
+ diffOpts = new(entities.DiffOptions)
formatFlagName := "format"
flags.StringVar(&diffOpts.Format, formatFlagName, "", "Change the output format (json)")
diff --git a/cmd/podman/images/history.go b/cmd/podman/images/history.go
index e190941e7..8f910f82d 100644
--- a/cmd/podman/images/history.go
+++ b/cmd/podman/images/history.go
@@ -13,7 +13,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -132,7 +131,7 @@ func history(cmd *cobra.Command, args []string) error {
})
if err := rpt.Execute(hdrs); err != nil {
- return errors.Wrapf(err, "failed to write report column headers")
+ return fmt.Errorf("failed to write report column headers: %w", err)
}
}
return rpt.Execute(hr)
diff --git a/cmd/podman/images/import.go b/cmd/podman/images/import.go
index 1910fef6d..8343a0bda 100644
--- a/cmd/podman/images/import.go
+++ b/cmd/podman/images/import.go
@@ -2,6 +2,7 @@ package images
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -102,7 +102,7 @@ func importCon(cmd *cobra.Command, args []string) error {
)
switch len(args) {
case 0:
- return errors.Errorf("need to give the path to the tarball, or must specify a tarball of '-' for stdin")
+ return errors.New("need to give the path to the tarball, or must specify a tarball of '-' for stdin")
case 1:
source = args[0]
case 2:
@@ -112,20 +112,20 @@ func importCon(cmd *cobra.Command, args []string) error {
// instead of the localhost ones
reference = args[1]
default:
- return errors.Errorf("too many arguments. Usage TARBALL [REFERENCE]")
+ return errors.New("too many arguments. Usage TARBALL [REFERENCE]")
}
if source == "-" {
outFile, err := ioutil.TempFile("", "podman")
if err != nil {
- return errors.Errorf("creating file %v", err)
+ return fmt.Errorf("creating file %v", err)
}
defer os.Remove(outFile.Name())
defer outFile.Close()
_, err = io.Copy(outFile, os.Stdin)
if err != nil {
- return errors.Errorf("copying file %v", err)
+ return fmt.Errorf("copying file %v", err)
}
source = outFile.Name()
}
diff --git a/cmd/podman/images/list.go b/cmd/podman/images/list.go
index 81011f9b1..94d8412e5 100644
--- a/cmd/podman/images/list.go
+++ b/cmd/podman/images/list.go
@@ -1,6 +1,7 @@
package images
import (
+ "errors"
"fmt"
"os"
"sort"
@@ -15,7 +16,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -225,7 +225,7 @@ func sortImages(imageS []*entities.ImageSummary) ([]imageReporter, error) {
h.ImageSummary = *e
h.Repository, h.Tag, err = tokenRepoTag(tag)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing repository tag: %q", tag)
+ return nil, fmt.Errorf("error parsing repository tag: %q: %w", tag, err)
}
if h.Tag == "<none>" {
untagged = append(untagged, h)
diff --git a/cmd/podman/images/load.go b/cmd/podman/images/load.go
index dbb7c32fa..367b628c7 100644
--- a/cmd/podman/images/load.go
+++ b/cmd/podman/images/load.go
@@ -2,6 +2,7 @@ package images
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/term"
)
@@ -91,18 +91,18 @@ func load(cmd *cobra.Command, args []string) error {
}
} else {
if term.IsTerminal(int(os.Stdin.Fd())) {
- return errors.Errorf("cannot read from terminal, use command-line redirection or the --input flag")
+ return errors.New("cannot read from terminal, use command-line redirection or the --input flag")
}
outFile, err := ioutil.TempFile(util.Tmpdir(), "podman")
if err != nil {
- return errors.Errorf("creating file %v", err)
+ return fmt.Errorf("creating file %v", err)
}
defer os.Remove(outFile.Name())
defer outFile.Close()
_, err = io.Copy(outFile, os.Stdin)
if err != nil {
- return errors.Errorf("copying file %v", err)
+ return fmt.Errorf("copying file %v", err)
}
loadOpts.Input = outFile.Name()
}
@@ -110,6 +110,6 @@ func load(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- fmt.Println("Loaded image(s): " + strings.Join(response.Names, ","))
+ fmt.Println("Loaded image: " + strings.Join(response.Names, "\nLoaded image: "))
return nil
}
diff --git a/cmd/podman/images/mount.go b/cmd/podman/images/mount.go
index d5ab3d274..cd54e24ae 100644
--- a/cmd/podman/images/mount.go
+++ b/cmd/podman/images/mount.go
@@ -1,15 +1,14 @@
package images
import (
+ "errors"
"fmt"
"os"
"github.com/containers/common/pkg/report"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
- "github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -71,16 +70,12 @@ func mount(cmd *cobra.Command, args []string) error {
return err
}
- if len(args) > 0 || mountOpts.All {
- var errs utils.OutputErrors
- for _, r := range reports {
- if r.Err == nil {
- fmt.Println(r.Path)
- continue
- }
- errs = append(errs, r.Err)
+ if len(args) == 1 && mountOpts.Format == "" && !mountOpts.All {
+ if len(reports) != 1 {
+ return fmt.Errorf("internal error: expected 1 report but got %d", len(reports))
}
- return errs.PrintErrors()
+ fmt.Println(reports[0].Path)
+ return nil
}
switch {
@@ -89,7 +84,7 @@ func mount(cmd *cobra.Command, args []string) error {
case mountOpts.Format == "":
break // see default format below
default:
- return errors.Errorf("unknown --format argument: %q", mountOpts.Format)
+ return fmt.Errorf("unknown --format argument: %q", mountOpts.Format)
}
mrs := make([]mountReporter, 0, len(reports))
diff --git a/cmd/podman/images/pull.go b/cmd/podman/images/pull.go
index a7da5518a..6e3ec1517 100644
--- a/cmd/podman/images/pull.go
+++ b/cmd/podman/images/pull.go
@@ -1,6 +1,7 @@
package images
import (
+ "errors"
"fmt"
"os"
"strings"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -138,7 +138,7 @@ func imagePull(cmd *cobra.Command, args []string) error {
}
if platform != "" {
if pullOptions.Arch != "" || pullOptions.OS != "" {
- return errors.Errorf("--platform option can not be specified with --arch or --os")
+ return errors.New("--platform option can not be specified with --arch or --os")
}
split := strings.SplitN(platform, "/", 2)
pullOptions.OS = split[0]
diff --git a/cmd/podman/images/push.go b/cmd/podman/images/push.go
index a59bdd93c..1b3419014 100644
--- a/cmd/podman/images/push.go
+++ b/cmd/podman/images/push.go
@@ -117,7 +117,6 @@ func pushFlags(cmd *cobra.Command) {
_ = flags.MarkHidden("compress")
_ = flags.MarkHidden("digestfile")
_ = flags.MarkHidden("quiet")
- _ = flags.MarkHidden("remove-signatures")
_ = flags.MarkHidden("sign-by")
}
if !registry.IsRemote() {
diff --git a/cmd/podman/images/rm.go b/cmd/podman/images/rm.go
index 13dab62d4..18b22e51d 100644
--- a/cmd/podman/images/rm.go
+++ b/cmd/podman/images/rm.go
@@ -1,13 +1,13 @@
package images
import (
+ "errors"
"fmt"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -62,10 +62,10 @@ func imageRemoveFlagSet(flags *pflag.FlagSet) {
func rm(cmd *cobra.Command, args []string) error {
if len(args) < 1 && !imageOpts.All {
- return errors.Errorf("image name or ID must be specified")
+ return errors.New("image name or ID must be specified")
}
if len(args) > 0 && imageOpts.All {
- return errors.Errorf("when using the --all switch, you may not pass any images names or IDs")
+ return errors.New("when using the --all switch, you may not pass any images names or IDs")
}
// Note: certain image-removal errors are non-fatal. Hence, the report
diff --git a/cmd/podman/images/save.go b/cmd/podman/images/save.go
index 3394c2e99..43366e1b3 100644
--- a/cmd/podman/images/save.go
+++ b/cmd/podman/images/save.go
@@ -2,17 +2,18 @@ package images
import (
"context"
+ "errors"
+ "fmt"
"os"
"strings"
"github.com/containers/common/pkg/completion"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/parse"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/term"
)
@@ -31,14 +32,14 @@ var (
RunE: save,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
- return errors.Errorf("need at least 1 argument")
+ return errors.New("need at least 1 argument")
}
format, err := cmd.Flags().GetString("format")
if err != nil {
return err
}
if !util.StringInSlice(format, common.ValidSaveFormats) {
- return errors.Errorf("format value must be one of %s", strings.Join(common.ValidSaveFormats, " "))
+ return fmt.Errorf("format value must be one of %s", strings.Join(common.ValidSaveFormats, " "))
}
return nil
},
@@ -103,13 +104,13 @@ func save(cmd *cobra.Command, args []string) (finalErr error) {
succeeded = false
)
if cmd.Flag("compress").Changed && (saveOpts.Format != define.OCIManifestDir && saveOpts.Format != define.V2s2ManifestDir) {
- return errors.Errorf("--compress can only be set when --format is either 'oci-dir' or 'docker-dir'")
+ return errors.New("--compress can only be set when --format is either 'oci-dir' or 'docker-dir'")
}
if len(saveOpts.Output) == 0 {
saveOpts.Quiet = true
fi := os.Stdout
if term.IsTerminal(int(fi.Fd())) {
- return errors.Errorf("refusing to save to terminal. Use -o flag or redirect")
+ return errors.New("refusing to save to terminal. Use -o flag or redirect")
}
pipePath, cleanup, err := setupPipe()
if err != nil {
diff --git a/cmd/podman/images/scp.go b/cmd/podman/images/scp.go
index 3dbc9c331..a7aa43e61 100644
--- a/cmd/podman/images/scp.go
+++ b/cmd/podman/images/scp.go
@@ -1,28 +1,12 @@
package images
import (
- "context"
- "fmt"
- "io/ioutil"
- urlP "net/url"
"os"
- "os/exec"
- "os/user"
- "strconv"
"strings"
- "github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
- "github.com/containers/podman/v4/cmd/podman/system/connection"
- "github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/containers/podman/v4/utils"
- scpD "github.com/dtylman/scp"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
- "golang.org/x/crypto/ssh"
)
var (
@@ -32,7 +16,6 @@ var (
Annotations: map[string]string{
registry.UnshareNSRequired: "",
registry.ParentNSRequired: "",
- registry.EngineMode: registry.ABIMode,
},
Long: saveScpDescription,
Short: "securely copy images",
@@ -46,9 +29,6 @@ var (
var (
parentFlags []string
quiet bool
- source entities.ImageScpOptions
- dest entities.ImageScpOptions
- sshInfo entities.ImageScpConnections
)
func init() {
@@ -66,7 +46,6 @@ func scpFlags(cmd *cobra.Command) {
func scp(cmd *cobra.Command, args []string) (finalErr error) {
var (
- // TODO add tag support for images
err error
)
for i, val := range os.Args {
@@ -81,288 +60,17 @@ func scp(cmd *cobra.Command, args []string) (finalErr error) {
}
parentFlags = append(parentFlags, val)
}
- podman, err := os.Executable()
- if err != nil {
- return err
- }
- f, err := ioutil.TempFile("", "podman") // open temp file for load/save output
- if err != nil {
- return err
- }
- confR, err := config.NewConfig("") // create a hand made config for the remote engine since we might use remote and native at once
- if err != nil {
- return errors.Wrapf(err, "could not make config")
- }
-
- abiEng, err := registry.NewImageEngine(cmd, args) // abi native engine
- if err != nil {
- return err
- }
-
- cfg, err := config.ReadCustomConfig() // get ready to set ssh destination if necessary
- if err != nil {
- return err
- }
- locations := []*entities.ImageScpOptions{}
- cliConnections := []string{}
- var flipConnections bool
- for _, arg := range args {
- loc, connect, err := parseImageSCPArg(arg)
- if err != nil {
- return err
- }
- locations = append(locations, loc)
- cliConnections = append(cliConnections, connect...)
- }
- source = *locations[0]
- switch {
- case len(locations) > 1:
- if flipConnections, err = validateSCPArgs(locations); err != nil {
- return err
- }
- if flipConnections { // the order of cliConnections matters, we need to flip both arrays since the args are parsed separately sometimes.
- cliConnections[0], cliConnections[1] = cliConnections[1], cliConnections[0]
- locations[0], locations[1] = locations[1], locations[0]
- }
- dest = *locations[1]
- case len(locations) == 1:
- switch {
- case len(locations[0].Image) == 0:
- return errors.Wrapf(define.ErrInvalidArg, "no source image specified")
- case len(locations[0].Image) > 0 && !locations[0].Remote && len(locations[0].User) == 0: // if we have podman image scp $IMAGE
- return errors.Wrapf(define.ErrInvalidArg, "must specify a destination")
- }
- }
-
- source.Quiet = quiet
- source.File = f.Name() // after parsing the arguments, set the file for the save/load
- dest.File = source.File
- if err = os.Remove(source.File); err != nil { // remove the file and simply use its name so podman creates the file upon save. avoids umask errors
- return err
- }
-
- allLocal := true // if we are all localhost, do not validate connections but if we are using one localhost and one non we need to use sshd
- for _, val := range cliConnections {
- if !strings.Contains(val, "@localhost::") {
- allLocal = false
- break
- }
- }
- if allLocal {
- cliConnections = []string{}
- }
-
- var serv map[string]config.Destination
- serv, err = GetServiceInformation(cliConnections, cfg)
- if err != nil {
- return err
- }
-
- // TODO: Add podman remote support
- confR.Engine = config.EngineConfig{Remote: true, CgroupManager: "cgroupfs", ServiceDestinations: serv} // pass the service dest (either remote or something else) to engine
- saveCmd, loadCmd := createCommands(podman)
- switch {
- case source.Remote: // if we want to load FROM the remote, dest can either be local or remote in this case
- err = saveToRemote(source.Image, source.File, "", sshInfo.URI[0], sshInfo.Identities[0])
- if err != nil {
- return err
- }
- if dest.Remote { // we want to load remote -> remote, both source and dest are remote
- rep, err := loadToRemote(dest.File, "", sshInfo.URI[1], sshInfo.Identities[1])
- if err != nil {
- return err
- }
- fmt.Println(rep)
- break
- }
- err = execPodman(podman, loadCmd)
- if err != nil {
- return err
- }
- case dest.Remote: // remote host load, implies source is local
- err = execPodman(podman, saveCmd)
- if err != nil {
- return err
- }
- rep, err := loadToRemote(source.File, "", sshInfo.URI[0], sshInfo.Identities[0])
- if err != nil {
- return err
- }
- fmt.Println(rep)
- if err = os.Remove(source.File); err != nil {
- return err
- }
- // TODO: Add podman remote support
- default: // else native load, both source and dest are local and transferring between users
- if source.User == "" { // source user has to be set, destination does not
- source.User = os.Getenv("USER")
- if source.User == "" {
- u, err := user.Current()
- if err != nil {
- return errors.Wrapf(err, "could not obtain user, make sure the environmental variable $USER is set")
- }
- source.User = u.Username
- }
- }
- err := abiEng.Transfer(context.Background(), source, dest, parentFlags)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// loadToRemote takes image and remote connection information. it connects to the specified client
-// and copies the saved image dir over to the remote host and then loads it onto the machine
-// returns a string containing output or an error
-func loadToRemote(localFile string, tag string, url *urlP.URL, iden string) (string, error) {
- dial, remoteFile, err := createConnection(url, iden)
- if err != nil {
- return "", err
- }
- defer dial.Close()
-
- n, err := scpD.CopyTo(dial, localFile, remoteFile)
- if err != nil {
- errOut := strconv.Itoa(int(n)) + " Bytes copied before error"
- return " ", errors.Wrapf(err, errOut)
- }
- var run string
- if tag != "" {
- return "", errors.Wrapf(define.ErrInvalidArg, "Renaming of an image is currently not supported")
- }
- podman := os.Args[0]
- run = podman + " image load --input=" + remoteFile + ";rm " + remoteFile // run ssh image load of the file copied via scp
- out, err := connection.ExecRemoteCommand(dial, run)
- if err != nil {
- return "", err
+ src := args[0]
+ dst := ""
+ if len(args) > 1 {
+ dst = args[1]
}
- return strings.TrimSuffix(string(out), "\n"), nil
-}
-
-// saveToRemote takes image information and remote connection information. it connects to the specified client
-// and saves the specified image on the remote machine and then copies it to the specified local location
-// returns an error if one occurs.
-func saveToRemote(image, localFile string, tag string, uri *urlP.URL, iden string) error {
- dial, remoteFile, err := createConnection(uri, iden)
+ err = registry.ImageEngine().Scp(registry.Context(), src, dst, parentFlags, quiet)
if err != nil {
return err
}
- defer dial.Close()
- if tag != "" {
- return errors.Wrapf(define.ErrInvalidArg, "Renaming of an image is currently not supported")
- }
- podman := os.Args[0]
- run := podman + " image save " + image + " --format=oci-archive --output=" + remoteFile // run ssh image load of the file copied via scp. Files are reverse in this case...
- _, err = connection.ExecRemoteCommand(dial, run)
- if err != nil {
- return err
- }
- n, err := scpD.CopyFrom(dial, remoteFile, localFile)
- if _, conErr := connection.ExecRemoteCommand(dial, "rm "+remoteFile); conErr != nil {
- logrus.Errorf("Removing file on endpoint: %v", conErr)
- }
- if err != nil {
- errOut := strconv.Itoa(int(n)) + " Bytes copied before error"
- return errors.Wrapf(err, errOut)
- }
return nil
}
-
-// makeRemoteFile creates the necessary remote file on the host to
-// save or load the image to. returns a string with the file name or an error
-func makeRemoteFile(dial *ssh.Client) (string, error) {
- run := "mktemp"
- remoteFile, err := connection.ExecRemoteCommand(dial, run)
- if err != nil {
- return "", err
- }
- return strings.TrimSuffix(string(remoteFile), "\n"), nil
-}
-
-// createConnections takes a boolean determining which ssh client to dial
-// and returns the dials client, its newly opened remote file, and an error if applicable.
-func createConnection(url *urlP.URL, iden string) (*ssh.Client, string, error) {
- cfg, err := connection.ValidateAndConfigure(url, iden)
- if err != nil {
- return nil, "", err
- }
- dialAdd, err := ssh.Dial("tcp", url.Host, cfg) // dial the client
- if err != nil {
- return nil, "", errors.Wrapf(err, "failed to connect")
- }
- file, err := makeRemoteFile(dialAdd)
- if err != nil {
- return nil, "", err
- }
-
- return dialAdd, file, nil
-}
-
-// GetSerivceInformation takes the parsed list of hosts to connect to and validates the information
-func GetServiceInformation(cliConnections []string, cfg *config.Config) (map[string]config.Destination, error) {
- var serv map[string]config.Destination
- var url string
- var iden string
- for i, val := range cliConnections {
- splitEnv := strings.SplitN(val, "::", 2)
- sshInfo.Connections = append(sshInfo.Connections, splitEnv[0])
- if len(splitEnv[1]) != 0 {
- err := validateImageName(splitEnv[1])
- if err != nil {
- return nil, err
- }
- source.Image = splitEnv[1]
- //TODO: actually use the new name given by the user
- }
- conn, found := cfg.Engine.ServiceDestinations[sshInfo.Connections[i]]
- if found {
- url = conn.URI
- iden = conn.Identity
- } else { // no match, warn user and do a manual connection.
- url = "ssh://" + sshInfo.Connections[i]
- iden = ""
- logrus.Warnf("Unknown connection name given. Please use system connection add to specify the default remote socket location")
- }
- urlT, err := urlP.Parse(url) // create an actual url to pass to exec command
- if err != nil {
- return nil, err
- }
- if urlT.User.Username() == "" {
- if urlT.User, err = connection.GetUserInfo(urlT); err != nil {
- return nil, err
- }
- }
- sshInfo.URI = append(sshInfo.URI, urlT)
- sshInfo.Identities = append(sshInfo.Identities, iden)
- }
- return serv, nil
-}
-
-// execPodman executes the podman save/load command given the podman binary
-func execPodman(podman string, command []string) error {
- cmd := exec.Command(podman)
- utils.CreateSCPCommand(cmd, command[1:])
- logrus.Debugf("Executing podman command: %q", cmd)
- return cmd.Run()
-}
-
-// createCommands forms the podman save and load commands used by SCP
-func createCommands(podman string) ([]string, []string) {
- var parentString string
- quiet := ""
- if source.Quiet {
- quiet = "-q "
- }
- if len(parentFlags) > 0 {
- parentString = strings.Join(parentFlags, " ") + " " // if there are parent args, an extra space needs to be added
- } else {
- parentString = strings.Join(parentFlags, " ")
- }
- loadCmd := strings.Split(fmt.Sprintf("%s %sload %s--input %s", podman, parentString, quiet, dest.File), " ")
- saveCmd := strings.Split(fmt.Sprintf("%s %vsave %s--output %s %s", podman, parentString, quiet, source.File, source.Image), " ")
- return saveCmd, loadCmd
-}
diff --git a/cmd/podman/images/scp_test.go b/cmd/podman/images/scp_test.go
deleted file mode 100644
index 315fda2ab..000000000
--- a/cmd/podman/images/scp_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package images
-
-import (
- "testing"
-
- "github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/stretchr/testify/assert"
-)
-
-func TestParseSCPArgs(t *testing.T) {
- args := []string{"alpine", "root@localhost::"}
- var source *entities.ImageScpOptions
- var dest *entities.ImageScpOptions
- var err error
- source, _, err = parseImageSCPArg(args[0])
- assert.Nil(t, err)
- assert.Equal(t, source.Image, "alpine")
-
- dest, _, err = parseImageSCPArg(args[1])
- assert.Nil(t, err)
- assert.Equal(t, dest.Image, "")
- assert.Equal(t, dest.User, "root")
-
- args = []string{"root@localhost::alpine"}
- source, _, err = parseImageSCPArg(args[0])
- assert.Nil(t, err)
- assert.Equal(t, source.User, "root")
- assert.Equal(t, source.Image, "alpine")
-
- args = []string{"charliedoern@192.168.68.126::alpine", "foobar@192.168.68.126::"}
- source, _, err = parseImageSCPArg(args[0])
- assert.Nil(t, err)
- assert.True(t, source.Remote)
- assert.Equal(t, source.Image, "alpine")
-
- dest, _, err = parseImageSCPArg(args[1])
- assert.Nil(t, err)
- assert.True(t, dest.Remote)
- assert.Equal(t, dest.Image, "")
-
- args = []string{"charliedoern@192.168.68.126::alpine"}
- source, _, err = parseImageSCPArg(args[0])
- assert.Nil(t, err)
- assert.True(t, source.Remote)
- assert.Equal(t, source.Image, "alpine")
-}
diff --git a/cmd/podman/images/scp_utils.go b/cmd/podman/images/scp_utils.go
deleted file mode 100644
index a85687a42..000000000
--- a/cmd/podman/images/scp_utils.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package images
-
-import (
- "strings"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
-)
-
-// parseImageSCPArg returns the valid connection, and source/destination data based off of the information provided by the user
-// arg is a string containing one of the cli arguments returned is a filled out source/destination options structs as well as a connections array and an error if applicable
-func parseImageSCPArg(arg string) (*entities.ImageScpOptions, []string, error) {
- location := entities.ImageScpOptions{}
- var err error
- cliConnections := []string{}
-
- switch {
- case strings.Contains(arg, "@localhost::"): // image transfer between users
- location.User = strings.Split(arg, "@")[0]
- location, err = validateImagePortion(location, arg)
- if err != nil {
- return nil, nil, err
- }
- cliConnections = append(cliConnections, arg)
- case strings.Contains(arg, "::"):
- location, err = validateImagePortion(location, arg)
- if err != nil {
- return nil, nil, err
- }
- location.Remote = true
- cliConnections = append(cliConnections, arg)
- default:
- location.Image = arg
- }
- return &location, cliConnections, nil
-}
-
-// validateImagePortion is a helper function to validate the image name in an SCP argument
-func validateImagePortion(location entities.ImageScpOptions, arg string) (entities.ImageScpOptions, error) {
- if remoteArgLength(arg, 1) > 0 {
- err := validateImageName(strings.Split(arg, "::")[1])
- if err != nil {
- return location, err
- }
- location.Image = strings.Split(arg, "::")[1] // this will get checked/set again once we validate connections
- }
- return location, nil
-}
-
-// validateSCPArgs takes the array of source and destination options and checks for common errors
-func validateSCPArgs(locations []*entities.ImageScpOptions) (bool, error) {
- if len(locations) > 2 {
- return false, errors.Wrapf(define.ErrInvalidArg, "cannot specify more than two arguments")
- }
- switch {
- case len(locations[0].Image) > 0 && len(locations[1].Image) > 0:
- return false, errors.Wrapf(define.ErrInvalidArg, "cannot specify an image rename")
- case len(locations[0].Image) == 0 && len(locations[1].Image) == 0:
- return false, errors.Wrapf(define.ErrInvalidArg, "a source image must be specified")
- case len(locations[0].Image) == 0 && len(locations[1].Image) != 0:
- if locations[0].Remote && locations[1].Remote {
- return true, nil // we need to flip the cliConnections array so the save/load connections are in the right place
- }
- }
- return false, nil
-}
-
-// validateImageName makes sure that the image given is valid and no injections are occurring
-// we simply use this for error checking, bot setting the image
-func validateImageName(input string) error {
- // ParseNormalizedNamed transforms a shortname image into its
- // full name reference so busybox => docker.io/library/busybox
- // we want to keep our shortnames, so only return an error if
- // we cannot parse what the user has given us
- _, err := reference.ParseNormalizedNamed(input)
- return err
-}
-
-// remoteArgLength is a helper function to simplify the extracting of host argument data
-// returns an int which contains the length of a specified index in a host::image string
-func remoteArgLength(input string, side int) int {
- if strings.Contains(input, "::") {
- return len((strings.Split(input, "::"))[side])
- }
- return -1
-}
diff --git a/cmd/podman/images/search.go b/cmd/podman/images/search.go
index 335ea2b5a..eb876d3d4 100644
--- a/cmd/podman/images/search.go
+++ b/cmd/podman/images/search.go
@@ -1,6 +1,7 @@
package images
import (
+ "errors"
"fmt"
"os"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -83,8 +83,7 @@ func searchFlags(cmd *cobra.Command) {
filterFlagName := "filter"
flags.StringSliceVarP(&searchOptions.Filters, filterFlagName, "f", []string{}, "Filter output based on conditions provided (default [])")
- // TODO add custom filter function
- _ = cmd.RegisterFlagCompletionFunc(filterFlagName, completion.AutocompleteNone)
+ _ = cmd.RegisterFlagCompletionFunc(filterFlagName, common.AutocompleteImageSearchFilters)
formatFlagName := "format"
flags.StringVar(&searchOptions.Format, formatFlagName, "", "Change the output format to JSON or a Go template")
@@ -112,11 +111,11 @@ func imageSearch(cmd *cobra.Command, args []string) error {
case 1:
searchTerm = args[0]
default:
- return errors.Errorf("search requires exactly one argument")
+ return errors.New("search requires exactly one argument")
}
if searchOptions.ListTags && len(searchOptions.Filters) != 0 {
- return errors.Errorf("filters are not applicable to list tags result")
+ return errors.New("filters are not applicable to list tags result")
}
// TLS verification in c/image is controlled via a `types.OptionalBool`
@@ -156,7 +155,7 @@ func imageSearch(cmd *cobra.Command, args []string) error {
switch {
case searchOptions.ListTags:
if len(searchOptions.Filters) != 0 {
- return errors.Errorf("filters are not applicable to list tags result")
+ return errors.New("filters are not applicable to list tags result")
}
if isJSON {
listTagsEntries := buildListTagsJSON(searchReport)
@@ -182,7 +181,7 @@ func imageSearch(cmd *cobra.Command, args []string) error {
if rpt.RenderHeaders {
hdrs := report.Headers(entities.ImageSearchReport{}, nil)
if err := rpt.Execute(hdrs); err != nil {
- return errors.Wrapf(err, "failed to write report column headers")
+ return fmt.Errorf("failed to write report column headers: %w", err)
}
}
return rpt.Execute(searchReport)
diff --git a/cmd/podman/images/sign.go b/cmd/podman/images/sign.go
index e4e201894..beea6d2b8 100644
--- a/cmd/podman/images/sign.go
+++ b/cmd/podman/images/sign.go
@@ -1,6 +1,7 @@
package images
import (
+ "errors"
"os"
"github.com/containers/common/pkg/auth"
@@ -8,7 +9,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -57,7 +57,7 @@ func init() {
func sign(cmd *cobra.Command, args []string) error {
if signOptions.SignBy == "" {
- return errors.Errorf("please provide an identity")
+ return errors.New("please provide an identity")
}
var sigStoreDir string
diff --git a/cmd/podman/images/trust_set.go b/cmd/podman/images/trust_set.go
index fff035d12..832e9f724 100644
--- a/cmd/podman/images/trust_set.go
+++ b/cmd/podman/images/trust_set.go
@@ -1,15 +1,15 @@
package images
import (
+ "fmt"
"net/url"
"regexp"
"github.com/containers/common/pkg/completion"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -61,7 +61,7 @@ func setTrust(cmd *cobra.Command, args []string) error {
}
if !util.StringInSlice(setOptions.Type, validTrustTypes) {
- return errors.Errorf("invalid choice: %s (choose from 'accept', 'reject', 'signedBy')", setOptions.Type)
+ return fmt.Errorf("invalid choice: %s (choose from 'accept', 'reject', 'signedBy')", setOptions.Type)
}
return registry.ImageEngine().SetTrust(registry.Context(), args, setOptions)
}
@@ -71,17 +71,17 @@ func isValidImageURI(imguri string) (bool, error) {
uri := "http://" + imguri
u, err := url.Parse(uri)
if err != nil {
- return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ return false, fmt.Errorf("invalid image uri: %s: %w", imguri, err)
}
reg := regexp.MustCompile(`^[a-zA-Z0-9-_\.]+\/?:?[0-9]*[a-z0-9-\/:]*$`)
ret := reg.FindAllString(u.Host, -1)
if len(ret) == 0 {
- return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ return false, fmt.Errorf("invalid image uri: %s: %w", imguri, err)
}
reg = regexp.MustCompile(`^[a-z0-9-:\./]*$`)
ret = reg.FindAllString(u.Fragment, -1)
if len(ret) == 0 {
- return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ return false, fmt.Errorf("invalid image uri: %s: %w", imguri, err)
}
return true, nil
}
diff --git a/cmd/podman/images/unmount.go b/cmd/podman/images/unmount.go
index 3ada09937..2a3df7cbd 100644
--- a/cmd/podman/images/unmount.go
+++ b/cmd/podman/images/unmount.go
@@ -1,13 +1,13 @@
package images
import (
+ "errors"
"fmt"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
diff --git a/cmd/podman/images/utils_linux.go b/cmd/podman/images/utils_linux.go
index f7c159415..5923716ec 100644
--- a/cmd/podman/images/utils_linux.go
+++ b/cmd/podman/images/utils_linux.go
@@ -1,12 +1,12 @@
package images
import (
+ "fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -26,7 +26,7 @@ func setupPipe() (string, func() <-chan error, error) {
if e := os.RemoveAll(pipeDir); e != nil {
logrus.Errorf("Removing named pipe: %q", e)
}
- return "", nil, errors.Wrapf(err, "error creating named pipe")
+ return "", nil, fmt.Errorf("error creating named pipe: %w", err)
}
go func() {
fpipe, err := os.Open(pipePath)
diff --git a/cmd/podman/inspect/inspect.go b/cmd/podman/inspect/inspect.go
index f6e3fca06..edddf026e 100644
--- a/cmd/podman/inspect/inspect.go
+++ b/cmd/podman/inspect/inspect.go
@@ -3,6 +3,7 @@ package inspect
import (
"context"
"encoding/json" // due to a bug in json-iterator it cannot be used here
+ "errors"
"fmt"
"os"
"regexp"
@@ -16,7 +17,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -41,7 +41,7 @@ func AddInspectFlagSet(cmd *cobra.Command) *entities.InspectOptions {
return &opts
}
-// Inspect inspects the specified container/image names or IDs.
+// Inspect inspects the specified container/image/pod/volume names or IDs.
func Inspect(namesOrIDs []string, options entities.InspectOptions) error {
inspector, err := newInspector(options)
if err != nil {
@@ -64,19 +64,19 @@ func newInspector(options entities.InspectOptions) (*inspector, error) {
case common.ImageType, common.ContainerType, common.AllType, common.PodType, common.NetworkType, common.VolumeType:
// Valid types.
default:
- return nil, errors.Errorf("invalid type %q: must be %q, %q, %q, %q, %q, or %q", options.Type,
+ return nil, fmt.Errorf("invalid type %q: must be %q, %q, %q, %q, %q, or %q", options.Type,
common.ImageType, common.ContainerType, common.PodType, common.NetworkType, common.VolumeType, common.AllType)
}
if options.Type == common.ImageType {
if options.Latest {
- return nil, errors.Errorf("latest is not supported for type %q", common.ImageType)
+ return nil, fmt.Errorf("latest is not supported for type %q", common.ImageType)
}
if options.Size {
- return nil, errors.Errorf("size is not supported for type %q", common.ImageType)
+ return nil, fmt.Errorf("size is not supported for type %q", common.ImageType)
}
}
if options.Type == common.PodType && options.Size {
- return nil, errors.Errorf("size is not supported for type %q", common.PodType)
+ return nil, fmt.Errorf("size is not supported for type %q", common.PodType)
}
podOpts := entities.PodInspectOptions{
Latest: options.Latest,
@@ -93,7 +93,7 @@ func newInspector(options entities.InspectOptions) (*inspector, error) {
// inspect inspects the specified container/image names or IDs.
func (i *inspector) inspect(namesOrIDs []string) error {
// data - dumping place for inspection results.
- var data []interface{} // nolint
+ var data []interface{}
var errs []error
ctx := context.Background()
@@ -145,8 +145,7 @@ func (i *inspector) inspect(namesOrIDs []string) error {
i.podOptions.NameOrID = pod
podData, err := i.containerEngine.PodInspect(ctx, i.podOptions)
if err != nil {
- cause := errors.Cause(err)
- if !strings.Contains(cause.Error(), define.ErrNoSuchPod.Error()) {
+ if !strings.Contains(err.Error(), define.ErrNoSuchPod.Error()) {
errs = []error{err}
} else {
return err
@@ -159,8 +158,7 @@ func (i *inspector) inspect(namesOrIDs []string) error {
if i.podOptions.Latest { // latest means there are no names in the namesOrID array
podData, err := i.containerEngine.PodInspect(ctx, i.podOptions)
if err != nil {
- cause := errors.Cause(err)
- if !strings.Contains(cause.Error(), define.ErrNoSuchPod.Error()) {
+ if !strings.Contains(err.Error(), define.ErrNoSuchPod.Error()) {
errs = []error{err}
} else {
return err
@@ -189,7 +187,7 @@ func (i *inspector) inspect(namesOrIDs []string) error {
data = append(data, volumeData[i])
}
default:
- return errors.Errorf("invalid type %q: must be %q, %q, %q, %q, %q, or %q", i.options.Type,
+ return fmt.Errorf("invalid type %q: must be %q, %q, %q, %q, %q, or %q", i.options.Type,
common.ImageType, common.ContainerType, common.PodType, common.NetworkType, common.VolumeType, common.AllType)
}
// Always print an empty array
@@ -218,7 +216,7 @@ func (i *inspector) inspect(namesOrIDs []string) error {
fmt.Fprintf(os.Stderr, "error inspecting object: %v\n", err)
}
}
- return errors.Errorf("inspecting object: %v", errs[0])
+ return fmt.Errorf("inspecting object: %w", errs[0])
}
return nil
}
@@ -249,7 +247,7 @@ func printTmpl(typ, row string, data []interface{}) error {
}
func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]interface{}, []error, error) {
- var data []interface{} // nolint
+ var data []interface{}
allErrs := []error{}
for _, name := range namesOrIDs {
ctrData, errs, err := i.containerEngine.ContainerInspect(ctx, []string{name}, i.options)
@@ -287,8 +285,7 @@ func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]inte
i.podOptions.NameOrID = name
podData, err := i.containerEngine.PodInspect(ctx, i.podOptions)
if err != nil {
- cause := errors.Cause(err)
- if !strings.Contains(cause.Error(), define.ErrNoSuchPod.Error()) {
+ if !strings.Contains(err.Error(), define.ErrNoSuchPod.Error()) {
return nil, nil, err
}
} else {
@@ -296,7 +293,7 @@ func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]inte
continue
}
if len(errs) > 0 {
- allErrs = append(allErrs, errors.Errorf("no such object: %q", name))
+ allErrs = append(allErrs, fmt.Errorf("no such object: %q", name))
continue
}
}
diff --git a/cmd/podman/machine/info.go b/cmd/podman/machine/info.go
new file mode 100644
index 000000000..9932027d8
--- /dev/null
+++ b/cmd/podman/machine/info.go
@@ -0,0 +1,182 @@
+//go:build amd64 || arm64
+// +build amd64 arm64
+
+package machine
+
+import (
+ "fmt"
+ "html/template"
+ "os"
+ "runtime"
+
+ "github.com/containers/common/pkg/completion"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/report"
+ "github.com/containers/podman/v4/cmd/podman/common"
+ "github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/cmd/podman/validate"
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/containers/podman/v4/pkg/machine"
+ "github.com/ghodss/yaml"
+ "github.com/spf13/cobra"
+)
+
+var infoDescription = `Display information pertaining to the machine host.`
+
+var (
+ infoCmd = &cobra.Command{
+ Use: "info [options]",
+ Short: "Display machine host info",
+ Long: infoDescription,
+ PersistentPreRunE: rootlessOnly,
+ RunE: info,
+ Args: validate.NoArgs,
+ ValidArgsFunction: completion.AutocompleteNone,
+ Example: `podman machine info`,
+ }
+)
+
+var (
+ inFormat string
+)
+
+// Info contains info on the machine host and version info
+type Info struct {
+ Host *HostInfo `json:"Host"`
+ Version define.Version `json:"Version"`
+}
+
+// HostInfo contains info on the machine host
+type HostInfo struct {
+ Arch string `json:"Arch"`
+ CurrentMachine string `json:"CurrentMachine"`
+ DefaultMachine string `json:"DefaultMachine"`
+ EventsDir string `json:"EventsDir"`
+ MachineConfigDir string `json:"MachineConfigDir"`
+ MachineImageDir string `json:"MachineImageDir"`
+ MachineState string `json:"MachineState"`
+ NumberOfMachines int `json:"NumberOfMachines"`
+ OS string `json:"OS"`
+ VMType string `json:"VMType"`
+}
+
+func init() {
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Command: infoCmd,
+ Parent: machineCmd,
+ })
+
+ flags := infoCmd.Flags()
+ formatFlagName := "format"
+ flags.StringVarP(&inFormat, formatFlagName, "f", "", "Change the output format to JSON or a Go template")
+ _ = infoCmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&define.Info{}))
+}
+
+func info(cmd *cobra.Command, args []string) error {
+ info := Info{}
+ version, err := define.GetVersion()
+ if err != nil {
+ return fmt.Errorf("error getting version info %w", err)
+ }
+ info.Version = version
+
+ host, err := hostInfo()
+ if err != nil {
+ return err
+ }
+ info.Host = host
+
+ switch {
+ case report.IsJSON(inFormat):
+ b, err := json.MarshalIndent(info, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(b))
+ case cmd.Flags().Changed("format"):
+ tmpl := template.New(cmd.Name()).Funcs(template.FuncMap(report.DefaultFuncs))
+ inFormat = report.NormalizeFormat(inFormat)
+ tmpl, err := tmpl.Parse(inFormat)
+ if err != nil {
+ return err
+ }
+ return tmpl.Execute(os.Stdout, info)
+ default:
+ b, err := yaml.Marshal(info)
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(b))
+ }
+
+ return nil
+}
+
+func hostInfo() (*HostInfo, error) {
+ host := HostInfo{}
+
+ host.Arch = runtime.GOARCH
+ host.OS = runtime.GOOS
+
+ provider := GetSystemDefaultProvider()
+ var listOpts machine.ListOptions
+ listResponse, err := provider.List(listOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get machines %w", err)
+ }
+
+ host.NumberOfMachines = len(listResponse)
+
+ cfg, err := config.ReadCustomConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ // Default state of machine is stopped
+ host.MachineState = "Stopped"
+ for _, vm := range listResponse {
+ // Set default machine if found
+ if vm.Name == cfg.Engine.ActiveService {
+ host.DefaultMachine = vm.Name
+ }
+ // If machine is running or starting, it is automatically the current machine
+ if vm.Running {
+ host.CurrentMachine = vm.Name
+ host.MachineState = "Running"
+ } else if vm.Starting {
+ host.CurrentMachine = vm.Name
+ host.MachineState = "Starting"
+ }
+ }
+ // If no machines are starting or running, set current machine to default machine
+ // If no default machines are found, do not report a default machine or a state
+ if host.CurrentMachine == "" {
+ if host.DefaultMachine == "" {
+ host.MachineState = ""
+ } else {
+ host.CurrentMachine = host.DefaultMachine
+ }
+ }
+
+ host.VMType = provider.VMType()
+
+ dataDir, err := machine.GetDataDir(host.VMType)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get machine image dir")
+ }
+ host.MachineImageDir = dataDir
+
+ confDir, err := machine.GetConfDir(host.VMType)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get machine config dir %w", err)
+ }
+ host.MachineConfigDir = confDir
+
+ eventsDir, err := eventSockDir()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get events dir: %w", err)
+ }
+ host.EventsDir = eventsDir
+
+ return &host, nil
+}
diff --git a/cmd/podman/machine/init.go b/cmd/podman/machine/init.go
index 6c31f3531..def3334e8 100644
--- a/cmd/podman/machine/init.go
+++ b/cmd/podman/machine/init.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -20,21 +19,20 @@ var (
Use: "init [options] [NAME]",
Short: "Initialize a virtual machine",
Long: "initialize a virtual machine ",
+ PersistentPreRunE: rootlessOnly,
RunE: initMachine,
Args: cobra.MaximumNArgs(1),
Example: `podman machine init myvm`,
ValidArgsFunction: completion.AutocompleteNone,
}
-)
-var (
initOpts = machine.InitOptions{}
defaultMachineName = machine.DefaultMachineName
now bool
)
// maxMachineNameSize is set to thirty to limit huge machine names primarily
-// because macos has a much smaller file size limit.
+// because macOS has a much smaller file size limit.
const maxMachineNameSize = 30
func init() {
@@ -111,7 +109,6 @@ func init() {
flags.BoolVar(&initOpts.Rootful, rootfulFlagName, false, "Whether this machine should prefer rootful container execution")
}
-// TODO should we allow for a users to append to the qemu cmdline?
func initMachine(cmd *cobra.Command, args []string) error {
var (
err error
@@ -122,12 +119,12 @@ func initMachine(cmd *cobra.Command, args []string) error {
initOpts.Name = defaultMachineName
if len(args) > 0 {
if len(args[0]) > maxMachineNameSize {
- return errors.New("machine name must be 30 characters or less")
+ return fmt.Errorf("machine name %q must be %d characters or less", args[0], maxMachineNameSize)
}
initOpts.Name = args[0]
}
if _, err := provider.LoadVMByName(initOpts.Name); err == nil {
- return errors.Wrap(machine.ErrVMAlreadyExists, initOpts.Name)
+ return fmt.Errorf("%s: %w", initOpts.Name, machine.ErrVMAlreadyExists)
}
for idx, vol := range initOpts.Volumes {
initOpts.Volumes[idx] = os.ExpandEnv(vol)
@@ -150,17 +147,12 @@ func initMachine(cmd *cobra.Command, args []string) error {
fmt.Println("Machine init complete")
if now {
- err = vm.Start(initOpts.Name, machine.StartOptions{})
- if err == nil {
- fmt.Printf("Machine %q started successfully\n", initOpts.Name)
- newMachineEvent(events.Start, events.Event{Name: initOpts.Name})
- }
- } else {
- extra := ""
- if initOpts.Name != defaultMachineName {
- extra = " " + initOpts.Name
- }
- fmt.Printf("To start your machine run:\n\n\tpodman machine start%s\n\n", extra)
+ return start(cmd, args)
+ }
+ extra := ""
+ if initOpts.Name != defaultMachineName {
+ extra = " " + initOpts.Name
}
+ fmt.Printf("To start your machine run:\n\n\tpodman machine start%s\n\n", extra)
return err
}
diff --git a/cmd/podman/machine/inspect.go b/cmd/podman/machine/inspect.go
index 4600a2b6d..d69c382f2 100644
--- a/cmd/podman/machine/inspect.go
+++ b/cmd/podman/machine/inspect.go
@@ -20,6 +20,7 @@ var (
Use: "inspect [options] [MACHINE...]",
Short: "Inspect an existing machine",
Long: "Provide details on a managed virtual machine",
+ PersistentPreRunE: rootlessOnly,
RunE: inspect,
Example: `podman machine inspect myvm`,
ValidArgsFunction: autocompleteMachine,
diff --git a/cmd/podman/machine/list.go b/cmd/podman/machine/list.go
index 5254d50cf..dd4a86697 100644
--- a/cmd/podman/machine/list.go
+++ b/cmd/podman/machine/list.go
@@ -4,6 +4,7 @@
package machine
import (
+ "fmt"
"os"
"sort"
"strconv"
@@ -15,9 +16,9 @@ import (
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
+ "github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/machine"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -27,6 +28,7 @@ var (
Aliases: []string{"ls"},
Short: "List machines",
Long: "List managed virtual machines.",
+ PersistentPreRunE: rootlessOnly,
RunE: list,
Args: validate.NoArgs,
ValidArgsFunction: completion.AutocompleteNone,
@@ -43,22 +45,6 @@ type listFlagType struct {
quiet bool
}
-type ListReporter struct {
- Name string
- Default bool
- Created string
- Running bool
- LastUp string
- Stream string
- VMType string
- CPUs uint64
- Memory string
- DiskSize string
- Port int
- RemoteUsername string
- IdentityPath string
-}
-
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Command: lsCmd,
@@ -68,7 +54,7 @@ func init() {
flags := lsCmd.Flags()
formatFlagName := "format"
flags.StringVar(&listFlag.format, formatFlagName, "{{.Name}}\t{{.VMType}}\t{{.Created}}\t{{.LastUp}}\t{{.CPUs}}\t{{.Memory}}\t{{.DiskSize}}\n", "Format volume output using JSON or a Go template")
- _ = lsCmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&ListReporter{}))
+ _ = lsCmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&entities.ListReporter{}))
flags.BoolVar(&listFlag.noHeading, "noheading", false, "Do not print headers")
flags.BoolVarP(&listFlag.quiet, "quiet", "q", false, "Show only machine names")
}
@@ -87,7 +73,7 @@ func list(cmd *cobra.Command, args []string) error {
provider := GetSystemDefaultProvider()
listResponse, err = provider.List(opts)
if err != nil {
- return errors.Wrap(err, "error listing vms")
+ return fmt.Errorf("listing vms: %w", err)
}
// Sort by last run
@@ -121,8 +107,8 @@ func list(cmd *cobra.Command, args []string) error {
return outputTemplate(cmd, machineReporter)
}
-func outputTemplate(cmd *cobra.Command, responses []*ListReporter) error {
- headers := report.Headers(ListReporter{}, map[string]string{
+func outputTemplate(cmd *cobra.Command, responses []*entities.ListReporter) error {
+ headers := report.Headers(entities.ListReporter{}, map[string]string{
"LastUp": "LAST UP",
"VmType": "VM TYPE",
"CPUs": "CPUS",
@@ -137,7 +123,7 @@ func outputTemplate(cmd *cobra.Command, responses []*ListReporter) error {
switch {
case cmd.Flags().Changed("format"):
row = cmd.Flag("format").Value.String()
- listFlag.noHeading = !report.HasTable(row)
+ printHeader = report.HasTable(row)
row = report.NormalizeFormat(row)
default:
row = cmd.Flag("format").Value.String()
@@ -156,7 +142,7 @@ func outputTemplate(cmd *cobra.Command, responses []*ListReporter) error {
defer w.Flush()
if printHeader {
if err := tmpl.Execute(w, headers); err != nil {
- return errors.Wrapf(err, "failed to write report column headers")
+ return fmt.Errorf("failed to write report column headers: %w", err)
}
}
return tmpl.Execute(w, responses)
@@ -181,15 +167,15 @@ func streamName(imageStream string) string {
return imageStream
}
-func toMachineFormat(vms []*machine.ListResponse) ([]*ListReporter, error) {
+func toMachineFormat(vms []*machine.ListResponse) ([]*entities.ListReporter, error) {
cfg, err := config.ReadCustomConfig()
if err != nil {
return nil, err
}
- machineResponses := make([]*ListReporter, 0, len(vms))
+ machineResponses := make([]*entities.ListReporter, 0, len(vms))
for _, vm := range vms {
- response := new(ListReporter)
+ response := new(entities.ListReporter)
response.Default = vm.Name == cfg.Engine.ActiveService
response.Name = vm.Name
response.Running = vm.Running
@@ -209,25 +195,29 @@ func toMachineFormat(vms []*machine.ListResponse) ([]*ListReporter, error) {
return machineResponses, nil
}
-func toHumanFormat(vms []*machine.ListResponse) ([]*ListReporter, error) {
+func toHumanFormat(vms []*machine.ListResponse) ([]*entities.ListReporter, error) {
cfg, err := config.ReadCustomConfig()
if err != nil {
return nil, err
}
- humanResponses := make([]*ListReporter, 0, len(vms))
+ humanResponses := make([]*entities.ListReporter, 0, len(vms))
for _, vm := range vms {
- response := new(ListReporter)
+ response := new(entities.ListReporter)
if vm.Name == cfg.Engine.ActiveService {
response.Name = vm.Name + "*"
response.Default = true
} else {
response.Name = vm.Name
}
- if vm.Running {
+ switch {
+ case vm.Running:
response.LastUp = "Currently running"
response.Running = true
- } else {
+ case vm.Starting:
+ response.LastUp = "Currently starting"
+ response.Starting = true
+ default:
response.LastUp = units.HumanDuration(time.Since(vm.LastUp)) + " ago"
}
response.Created = units.HumanDuration(time.Since(vm.CreatedAt)) + " ago"
diff --git a/cmd/podman/machine/machine.go b/cmd/podman/machine/machine.go
index 5a8a06b9d..0618337cc 100644
--- a/cmd/podman/machine/machine.go
+++ b/cmd/podman/machine/machine.go
@@ -101,12 +101,6 @@ func resolveEventSock() ([]string, error) {
return []string{sock}, nil
}
- xdg, err := util.GetRuntimeDir()
- if err != nil {
- logrus.Warnf("Failed to get runtime dir, machine events will not be published: %s", err)
- return nil, nil
- }
-
re := regexp.MustCompile(`machine_events.*\.sock`)
sockPaths := make([]string, 0)
fn := func(path string, info os.DirEntry, err error) error {
@@ -125,8 +119,12 @@ func resolveEventSock() ([]string, error) {
sockPaths = append(sockPaths, path)
return nil
}
+ sockDir, err := eventSockDir()
+ if err != nil {
+ logrus.Warnf("Failed to get runtime dir, machine events will not be published: %s", err)
+ }
- if err := filepath.WalkDir(filepath.Join(xdg, "podman"), fn); err != nil {
+ if err := filepath.WalkDir(sockDir, fn); err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
@@ -135,6 +133,14 @@ func resolveEventSock() ([]string, error) {
return sockPaths, nil
}
+func eventSockDir() (string, error) {
+ xdg, err := util.GetRuntimeDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(xdg, "podman"), nil
+}
+
func newMachineEvent(status events.Status, event events.Event) {
openEventSock.Do(initMachineEvents)
diff --git a/cmd/podman/machine/machine_unix.go b/cmd/podman/machine/machine_unix.go
index b56d081ec..a2d9b9d8e 100644
--- a/cmd/podman/machine/machine_unix.go
+++ b/cmd/podman/machine/machine_unix.go
@@ -4,9 +4,20 @@
package machine
import (
+ "fmt"
"os"
+
+ "github.com/containers/podman/v4/pkg/rootless"
+ "github.com/spf13/cobra"
)
func isUnixSocket(file os.DirEntry) bool {
return file.Type()&os.ModeSocket != 0
}
+
+func rootlessOnly(cmd *cobra.Command, args []string) error {
+ if !rootless.IsRootless() {
+ return fmt.Errorf("cannot run command %q as root", cmd.CommandPath())
+ }
+ return nil
+}
diff --git a/cmd/podman/machine/machine_windows.go b/cmd/podman/machine/machine_windows.go
index ffd5d8827..bf1240868 100644
--- a/cmd/podman/machine/machine_windows.go
+++ b/cmd/podman/machine/machine_windows.go
@@ -3,9 +3,18 @@ package machine
import (
"os"
"strings"
+
+ "github.com/spf13/cobra"
)
func isUnixSocket(file os.DirEntry) bool {
// Assume a socket on Windows, since sock mode is not supported yet https://github.com/golang/go/issues/33357
return !file.Type().IsDir() && strings.HasSuffix(file.Name(), ".sock")
}
+
+func rootlessOnly(cmd *cobra.Command, args []string) error {
+ // Rootless is not relevant on Windows. In the future rootless.IsRootless
+ // could be switched to return true on Windows, and other codepaths migrated
+
+ return nil
+}
diff --git a/cmd/podman/machine/rm.go b/cmd/podman/machine/rm.go
index a6e66265c..362c9a7d3 100644
--- a/cmd/podman/machine/rm.go
+++ b/cmd/podman/machine/rm.go
@@ -20,6 +20,7 @@ var (
Use: "rm [options] [MACHINE]",
Short: "Remove an existing machine",
Long: "Remove a managed virtual machine ",
+ PersistentPreRunE: rootlessOnly,
RunE: rm,
Args: cobra.MaximumNArgs(1),
Example: `podman machine rm myvm`,
diff --git a/cmd/podman/machine/set.go b/cmd/podman/machine/set.go
index 5777882da..1b9e1b2bd 100644
--- a/cmd/podman/machine/set.go
+++ b/cmd/podman/machine/set.go
@@ -18,6 +18,7 @@ var (
Use: "set [options] [NAME]",
Short: "Sets a virtual machine setting",
Long: "Sets an updatable virtual machine setting",
+ PersistentPreRunE: rootlessOnly,
RunE: setMachine,
Args: cobra.MaximumNArgs(1),
Example: `podman machine set --rootful=false`,
diff --git a/cmd/podman/machine/ssh.go b/cmd/podman/machine/ssh.go
index 4a86da67a..cb2f62f51 100644
--- a/cmd/podman/machine/ssh.go
+++ b/cmd/podman/machine/ssh.go
@@ -4,22 +4,24 @@
package machine
import (
+ "fmt"
"net/url"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/machine"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
sshCmd = &cobra.Command{
- Use: "ssh [options] [NAME] [COMMAND [ARG ...]]",
- Short: "SSH into an existing machine",
- Long: "SSH into a managed virtual machine ",
- RunE: ssh,
+ Use: "ssh [options] [NAME] [COMMAND [ARG ...]]",
+ Short: "SSH into an existing machine",
+ Long: "SSH into a managed virtual machine ",
+ PersistentPreRunE: rootlessOnly,
+ RunE: ssh,
Example: `podman machine ssh myvm
podman machine ssh myvm echo hello`,
ValidArgsFunction: autocompleteMachineSSH,
@@ -87,9 +89,10 @@ func ssh(cmd *cobra.Command, args []string) error {
vm, err = provider.LoadVMByName(vmName)
if err != nil {
- return errors.Wrapf(err, "vm %s not found", vmName)
+ return fmt.Errorf("vm %s not found: %w", vmName, err)
}
- return vm.SSH(vmName, sshOpts)
+ err = vm.SSH(vmName, sshOpts)
+ return utils.HandleOSExecError(err)
}
func remoteConnectionUsername() (string, error) {
diff --git a/cmd/podman/machine/start.go b/cmd/podman/machine/start.go
index c9b99e63b..15a75522a 100644
--- a/cmd/podman/machine/start.go
+++ b/cmd/podman/machine/start.go
@@ -9,7 +9,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -18,6 +17,7 @@ var (
Use: "start [MACHINE]",
Short: "Start an existing machine",
Long: "Start a managed virtual machine ",
+ PersistentPreRunE: rootlessOnly,
RunE: start,
Args: cobra.MaximumNArgs(1),
Example: `podman machine start myvm`,
@@ -54,9 +54,9 @@ func start(_ *cobra.Command, args []string) error {
}
if active {
if vmName == activeName {
- return errors.Wrapf(machine.ErrVMAlreadyRunning, "cannot start VM %s", vmName)
+ return fmt.Errorf("cannot start VM %s: %w", vmName, machine.ErrVMAlreadyRunning)
}
- return errors.Wrapf(machine.ErrMultipleActiveVM, "cannot start VM %s. VM %s is currently running", vmName, activeName)
+ return fmt.Errorf("cannot start VM %s. VM %s is currently running or starting: %w", vmName, activeName, machine.ErrMultipleActiveVM)
}
fmt.Printf("Starting machine %q\n", vmName)
if err := vm.Start(vmName, machine.StartOptions{}); err != nil {
diff --git a/cmd/podman/machine/stop.go b/cmd/podman/machine/stop.go
index 993662792..ce87a44c4 100644
--- a/cmd/podman/machine/stop.go
+++ b/cmd/podman/machine/stop.go
@@ -17,6 +17,7 @@ var (
Use: "stop [MACHINE]",
Short: "Stop an existing machine",
Long: "Stop a managed virtual machine ",
+ PersistentPreRunE: rootlessOnly,
RunE: stop,
Args: cobra.MaximumNArgs(1),
Example: `podman machine stop myvm`,
diff --git a/cmd/podman/manifest/push.go b/cmd/podman/manifest/push.go
index b96a65c4a..9479e79a3 100644
--- a/cmd/podman/manifest/push.go
+++ b/cmd/podman/manifest/push.go
@@ -1,6 +1,7 @@
package manifest
import (
+ "fmt"
"io/ioutil"
"github.com/containers/common/pkg/auth"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -88,10 +88,10 @@ func push(cmd *cobra.Command, args []string) error {
listImageSpec := args[0]
destSpec := args[1]
if listImageSpec == "" {
- return errors.Errorf(`invalid image name "%s"`, listImageSpec)
+ return fmt.Errorf(`invalid image name "%s"`, listImageSpec)
}
if destSpec == "" {
- return errors.Errorf(`invalid destination "%s"`, destSpec)
+ return fmt.Errorf(`invalid destination "%s"`, destSpec)
}
if manifestPushOpts.CredentialsCLI != "" {
diff --git a/cmd/podman/manifest/remove.go b/cmd/podman/manifest/remove.go
index c32ffad78..4aa3b66b7 100644
--- a/cmd/podman/manifest/remove.go
+++ b/cmd/podman/manifest/remove.go
@@ -5,7 +5,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -31,7 +30,7 @@ func init() {
func remove(cmd *cobra.Command, args []string) error {
updatedListID, err := registry.ImageEngine().ManifestRemoveDigest(registry.Context(), args[0], args[1])
if err != nil {
- return errors.Wrapf(err, "error removing from manifest list %s", args[0])
+ return fmt.Errorf("removing from manifest list %s: %w", args[0], err)
}
fmt.Println(updatedListID)
return nil
diff --git a/cmd/podman/networks/create.go b/cmd/podman/networks/create.go
index 84c58d4dc..2cf7023f3 100644
--- a/cmd/podman/networks/create.go
+++ b/cmd/podman/networks/create.go
@@ -1,6 +1,7 @@
package network
import (
+ "errors"
"fmt"
"net"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/parse"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -97,11 +97,11 @@ func networkCreate(cmd *cobra.Command, args []string) error {
var err error
networkCreateOptions.Labels, err = parse.GetAllLabels([]string{}, labels)
if err != nil {
- return errors.Wrap(err, "failed to parse labels")
+ return fmt.Errorf("failed to parse labels: %w", err)
}
networkCreateOptions.Options, err = parse.GetAllLabels([]string{}, opts)
if err != nil {
- return errors.Wrapf(err, "unable to parse options")
+ return fmt.Errorf("unable to parse options: %w", err)
}
network := types.Network{
@@ -181,11 +181,11 @@ func parseRange(iprange string) (*types.LeaseRange, error) {
startIP, err := util.FirstIPInSubnet(subnet)
if err != nil {
- return nil, errors.Wrap(err, "failed to get first ip in range")
+ return nil, fmt.Errorf("failed to get first ip in range: %w", err)
}
lastIP, err := util.LastIPInSubnet(subnet)
if err != nil {
- return nil, errors.Wrap(err, "failed to get last ip in range")
+ return nil, fmt.Errorf("failed to get last ip in range: %w", err)
}
return &types.LeaseRange{
StartIP: startIP,
diff --git a/cmd/podman/networks/reload.go b/cmd/podman/networks/reload.go
index 7b6323187..66248e9fb 100644
--- a/cmd/podman/networks/reload.go
+++ b/cmd/podman/networks/reload.go
@@ -21,7 +21,7 @@ var (
Long: networkReloadDescription,
RunE: networkReload,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompleteContainers,
Example: `podman network reload --latest
diff --git a/cmd/podman/networks/rm.go b/cmd/podman/networks/rm.go
index f71f59eea..c2d3f655f 100644
--- a/cmd/podman/networks/rm.go
+++ b/cmd/podman/networks/rm.go
@@ -1,6 +1,7 @@
package network
import (
+ "errors"
"fmt"
"strings"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -78,15 +78,9 @@ func networkRm(cmd *cobra.Command, args []string) error {
}
func setExitCode(err error) {
- cause := errors.Cause(err)
- switch {
- case cause == define.ErrNoSuchNetwork:
+ if errors.Is(err, define.ErrNoSuchNetwork) || strings.Contains(err.Error(), define.ErrNoSuchNetwork.Error()) {
registry.SetExitCode(1)
- case strings.Contains(cause.Error(), define.ErrNoSuchNetwork.Error()):
- registry.SetExitCode(1)
- case cause == define.ErrNetworkInUse:
- registry.SetExitCode(2)
- case strings.Contains(cause.Error(), define.ErrNetworkInUse.Error()):
+ } else if errors.Is(err, define.ErrNetworkInUse) || strings.Contains(err.Error(), define.ErrNetworkInUse.Error()) {
registry.SetExitCode(2)
}
}
diff --git a/cmd/podman/parse/filters.go b/cmd/podman/parse/filters.go
index 8a10f2a97..e4ab942af 100644
--- a/cmd/podman/parse/filters.go
+++ b/cmd/podman/parse/filters.go
@@ -1,10 +1,9 @@
package parse
import (
+ "fmt"
"net/url"
"strings"
-
- "github.com/pkg/errors"
)
func FilterArgumentsIntoFilters(filters []string) (url.Values, error) {
@@ -12,7 +11,7 @@ func FilterArgumentsIntoFilters(filters []string) (url.Values, error) {
for _, f := range filters {
t := strings.SplitN(f, "=", 2)
if len(t) < 2 {
- return parsedFilters, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
+ return parsedFilters, fmt.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
}
parsedFilters.Add(t[0], t[1])
}
diff --git a/cmd/podman/parse/net.go b/cmd/podman/parse/net.go
index 870690db3..9228c7127 100644
--- a/cmd/podman/parse/net.go
+++ b/cmd/podman/parse/net.go
@@ -1,4 +1,3 @@
-// nolint
// most of these validate and parse functions have been taken from projectatomic/docker
// and modified for cri-o
package parse
@@ -11,29 +10,13 @@ import (
"os"
"regexp"
"strings"
-
- "github.com/pkg/errors"
)
const (
- Protocol_TCP Protocol = 0
- Protocol_UDP Protocol = 1
+ LabelType string = "label"
+ ENVType string = "env"
)
-type Protocol int32
-
-// PortMapping specifies the port mapping configurations of a sandbox.
-type PortMapping struct {
- // Protocol of the port mapping.
- Protocol Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=runtime.Protocol" json:"protocol,omitempty"`
- // Port number within the container. Default: 0 (not specified).
- ContainerPort int32 `protobuf:"varint,2,opt,name=container_port,json=containerPort,proto3" json:"container_port,omitempty"`
- // Port number on the host. Default: 0 (not specified).
- HostPort int32 `protobuf:"varint,3,opt,name=host_port,json=hostPort,proto3" json:"host_port,omitempty"`
- // Host IP.
- HostIp string `protobuf:"bytes,4,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"`
-}
-
// Note: for flags that are in the form <number><unit>, use the RAMInBytes function
// from the units package in docker/go-units/size.go
@@ -46,7 +29,7 @@ var (
// validateExtraHost validates that the specified string is a valid extrahost and returns it.
// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
// for add-host flag
-func ValidateExtraHost(val string) (string, error) { // nolint
+func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
@@ -89,16 +72,14 @@ func GetAllLabels(labelFile, inputLabels []string) (map[string]string, error) {
// There's an argument that we SHOULD be doing that parsing for
// all environment variables, even those sourced from files, but
// that would require a substantial rework.
- if err := parseEnvFile(labels, file); err != nil {
- // FIXME: parseEnvFile is using parseEnv, so we need to add extra
- // logic for labels.
+ if err := parseEnvOrLabelFile(labels, file, LabelType); err != nil {
return nil, err
}
}
for _, label := range inputLabels {
split := strings.SplitN(label, "=", 2)
if split[0] == "" {
- return nil, errors.Errorf("invalid label format: %q", label)
+ return nil, fmt.Errorf("invalid label format: %q", label)
}
value := ""
if len(split) > 1 {
@@ -109,18 +90,18 @@ func GetAllLabels(labelFile, inputLabels []string) (map[string]string, error) {
return labels, nil
}
-func parseEnv(env map[string]string, line string) error {
+func parseEnvOrLabel(env map[string]string, line, configType string) error {
data := strings.SplitN(line, "=", 2)
// catch invalid variables such as "=" or "=A"
if data[0] == "" {
- return errors.Errorf("invalid environment variable: %q", line)
+ return fmt.Errorf("invalid environment variable: %q", line)
}
// trim the front of a variable, but nothing else
name := strings.TrimLeft(data[0], whiteSpaces)
if strings.ContainsAny(name, whiteSpaces) {
- return errors.Errorf("name %q has white spaces, poorly formatted name", name)
+ return fmt.Errorf("name %q has white spaces, poorly formatted name", name)
}
if len(data) > 1 {
@@ -137,7 +118,7 @@ func parseEnv(env map[string]string, line string) error {
env[part[0]] = part[1]
}
}
- } else {
+ } else if configType == ENVType {
// if only a pass-through variable is given, clean it up.
if val, ok := os.LookupEnv(name); ok {
env[name] = val
@@ -147,8 +128,9 @@ func parseEnv(env map[string]string, line string) error {
return nil
}
-// parseEnvFile reads a file with environment variables enumerated by lines
-func parseEnvFile(env map[string]string, filename string) error {
+// parseEnvOrLabelFile reads a file with environment variables enumerated by lines
+// configType should be set to either "label" or "env" based on what type is being parsed
+func parseEnvOrLabelFile(envOrLabel map[string]string, filename, configType string) error {
fh, err := os.Open(filename)
if err != nil {
return err
@@ -161,7 +143,7 @@ func parseEnvFile(env map[string]string, filename string) error {
line := strings.TrimLeft(scanner.Text(), whiteSpaces)
// line is not empty, and not starting with '#'
if len(line) > 0 && !strings.HasPrefix(line, "#") {
- if err := parseEnv(env, line); err != nil {
+ if err := parseEnvOrLabel(envOrLabel, line, configType); err != nil {
return err
}
}
@@ -173,7 +155,7 @@ func parseEnvFile(env map[string]string, filename string) error {
// as it is currently not supported
func ValidateFileName(filename string) error {
if strings.Contains(filename, ":") {
- return errors.Errorf("invalid filename (should not contain ':') %q", filename)
+ return fmt.Errorf("invalid filename (should not contain ':') %q", filename)
}
return nil
}
@@ -182,10 +164,10 @@ func ValidateFileName(filename string) error {
func ValidURL(urlStr string) error {
url, err := url.ParseRequestURI(urlStr)
if err != nil {
- return errors.Wrapf(err, "invalid url %q", urlStr)
+ return fmt.Errorf("invalid url %q: %w", urlStr, err)
}
if url.Scheme == "" {
- return errors.Errorf("invalid url %q: missing scheme", urlStr)
+ return fmt.Errorf("invalid url %q: missing scheme", urlStr)
}
return nil
}
diff --git a/cmd/podman/parse/net_test.go b/cmd/podman/parse/net_test.go
index 51c8509df..a11edc2ca 100644
--- a/cmd/podman/parse/net_test.go
+++ b/cmd/podman/parse/net_test.go
@@ -1,4 +1,3 @@
-// nolint
// most of these validate and parse functions have been taken from projectatomic/docker
// and modified for cri-o
package parse
@@ -23,7 +22,6 @@ func createTmpFile(content []byte) (string, error) {
if _, err := tmpfile.Write(content); err != nil {
return "", err
-
}
if err := tmpfile.Close(); err != nil {
return "", err
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index f5b121009..8fd12baaf 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -1,6 +1,7 @@
package pods
import (
+ "errors"
"fmt"
"net"
"os"
@@ -16,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -188,14 +188,14 @@ func kube(cmd *cobra.Command, args []string) error {
for _, annotation := range annotations {
splitN := strings.SplitN(annotation, "=", 2)
if len(splitN) > 2 {
- return errors.Errorf("annotation %q must include an '=' sign", annotation)
+ return fmt.Errorf("annotation %q must include an '=' sign", annotation)
}
if kubeOptions.Annotations == nil {
kubeOptions.Annotations = make(map[string]string)
}
annotation := splitN[1]
if len(annotation) > define.MaxKubeAnnotation {
- return errors.Errorf("annotation exceeds maximum size, %d, of kubernetes annotation: %s", define.MaxKubeAnnotation, annotation)
+ return fmt.Errorf("annotation exceeds maximum size, %d, of kubernetes annotation: %s", define.MaxKubeAnnotation, annotation)
}
kubeOptions.Annotations[splitN[0]] = annotation
}
@@ -235,7 +235,7 @@ func teardown(yamlfile string) error {
defer f.Close()
reports, err := registry.ContainerEngine().PlayKubeDown(registry.GetContext(), f, *options)
if err != nil {
- return errors.Wrap(err, yamlfile)
+ return fmt.Errorf("%v: %w", yamlfile, err)
}
// Output stopped pods
@@ -273,7 +273,7 @@ func playkube(yamlfile string) error {
defer f.Close()
report, err := registry.ContainerEngine().PlayKube(registry.GetContext(), f, kubeOptions.PlayKubeOptions)
if err != nil {
- return errors.Wrap(err, yamlfile)
+ return fmt.Errorf("%s: %w", yamlfile, err)
}
// Print volumes report
for i, volume := range report.Volumes {
@@ -320,7 +320,7 @@ func playkube(yamlfile string) error {
}
if ctrsFailed > 0 {
- return errors.Errorf("failed to start %d containers", ctrsFailed)
+ return fmt.Errorf("failed to start %d containers", ctrsFailed)
}
return nil
diff --git a/cmd/podman/pods/clone.go b/cmd/podman/pods/clone.go
new file mode 100644
index 000000000..9558c6aed
--- /dev/null
+++ b/cmd/podman/pods/clone.go
@@ -0,0 +1,92 @@
+package pods
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/common/pkg/completion"
+ "github.com/containers/podman/v4/cmd/podman/common"
+ "github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/containers/podman/v4/pkg/domain/entities"
+ "github.com/spf13/cobra"
+)
+
+var (
+ podCloneDescription = `Create an exact copy of a pod and the containers within it`
+
+ podCloneCommand = &cobra.Command{
+ Use: "clone [options] POD NAME",
+ Short: "Clone an existing pod",
+ Long: podCloneDescription,
+ RunE: clone,
+ Args: cobra.RangeArgs(1, 2),
+ ValidArgsFunction: common.AutocompleteClone,
+ Example: `podman pod clone pod_name new_name`,
+ }
+)
+
+var (
+ podClone entities.PodCloneOptions
+)
+
+func cloneFlags(cmd *cobra.Command) {
+ flags := cmd.Flags()
+
+ destroyFlagName := "destroy"
+ flags.BoolVar(&podClone.Destroy, destroyFlagName, false, "destroy the original pod")
+
+ startFlagName := "start"
+ flags.BoolVar(&podClone.Start, startFlagName, false, "start the new pod")
+
+ nameFlagName := "name"
+ flags.StringVarP(&podClone.CreateOpts.Name, nameFlagName, "n", "", "name the new pod")
+ _ = podCloneCommand.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone)
+
+ common.DefineCreateDefaults(&podClone.InfraOptions)
+ common.DefineCreateFlags(cmd, &podClone.InfraOptions, true, false)
+
+ podClone.InfraOptions.MemorySwappiness = -1 // this is not implemented for pods yet, need to set -1 default manually
+
+ // need to fill an empty ctr create option for each container for sane defaults
+ // for now, these cannot be used. The flag names conflict too much
+ // this makes sense since this is a pod command not a container command
+ // TODO: add support for container specific arguments/flags
+ common.DefineCreateDefaults(&podClone.PerContainerOptions)
+}
+func init() {
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Command: podCloneCommand,
+ Parent: podCmd,
+ })
+
+ cloneFlags(podCloneCommand)
+}
+
+func clone(cmd *cobra.Command, args []string) error {
+ switch len(args) {
+ case 0:
+ return fmt.Errorf("must specify at least 1 argument: %w", define.ErrInvalidArg)
+ case 2:
+ podClone.CreateOpts.Name = args[1]
+ }
+
+ podClone.ID = args[0]
+
+ if cmd.Flag("shm-size").Changed {
+ podClone.InfraOptions.ShmSize = cmd.Flag("shm-size").Value.String()
+ }
+
+ podClone.PerContainerOptions.IsClone = true
+ rep, err := registry.ContainerEngine().PodClone(context.Background(), podClone)
+ if err != nil {
+ if rep != nil {
+ fmt.Printf("pod %s created but error after creation\n", rep.Id)
+ }
+ return err
+ }
+
+ fmt.Println(rep.Id)
+
+ return nil
+}
diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go
index 62f820790..aea8a7229 100644
--- a/cmd/podman/pods/create.go
+++ b/cmd/podman/pods/create.go
@@ -2,6 +2,7 @@ package pods
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -16,7 +17,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/containers"
"github.com/containers/podman/v4/cmd/podman/parse"
"github.com/containers/podman/v4/cmd/podman/registry"
- "github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
@@ -24,7 +24,6 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/docker/docker/pkg/parsers"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -36,12 +35,14 @@ var (
You can then start it at any time with the podman pod start <pod_id> command. The pod will be created with the initial state 'created'.`
createCommand = &cobra.Command{
- Use: "create [options]",
- Args: validate.NoArgs,
+ Use: "create [options] [NAME]",
+ Args: cobra.MaximumNArgs(1),
Short: "Create a new empty pod",
Long: podCreateDescription,
RunE: create,
ValidArgsFunction: completion.AutocompleteNone,
+ Example: `podman pod create
+ podman pod create --label foo=bar mypod`,
}
)
@@ -63,6 +64,7 @@ func init() {
})
flags := createCommand.Flags()
flags.SetInterspersed(false)
+ common.DefineCreateDefaults(&infraOptions)
common.DefineCreateFlags(createCommand, &infraOptions, true, false)
common.DefineNetFlags(createCommand)
@@ -115,11 +117,17 @@ func create(cmd *cobra.Command, args []string) error {
rawImageName string
podName string
)
+ if len(args) > 0 {
+ if len(createOptions.Name) > 0 {
+ return fmt.Errorf("cannot specify --name and NAME at the same time")
+ }
+ createOptions.Name = args[0]
+ }
labelFile = infraOptions.LabelFile
labels = infraOptions.Label
createOptions.Labels, err = parse.GetAllLabels(labelFile, labels)
if err != nil {
- return errors.Wrapf(err, "unable to process labels")
+ return fmt.Errorf("unable to process labels: %w", err)
}
if cmd.Flag("infra-image").Changed {
@@ -128,7 +136,7 @@ func create(cmd *cobra.Command, args []string) error {
img := imageName
if !createOptions.Infra {
if cmd.Flag("no-hosts").Changed {
- return fmt.Errorf("cannot specify no-hosts without an infra container")
+ return fmt.Errorf("cannot specify --no-hosts without an infra container")
}
flags := cmd.Flags()
createOptions.Net, err = common.NetFlagsToNetOptions(nil, *flags)
@@ -157,9 +165,14 @@ func create(cmd *cobra.Command, args []string) error {
return err
}
if strings.Contains(share, "cgroup") && shareParent {
- return errors.Wrapf(define.ErrInvalidArg, "cannot define the pod as the cgroup parent at the same time as joining the infra container's cgroupNS")
+ return fmt.Errorf("cannot define the pod as the cgroup parent at the same time as joining the infra container's cgroupNS: %w", define.ErrInvalidArg)
}
- createOptions.Share = strings.Split(share, ",")
+
+ if strings.HasPrefix(share, "+") {
+ createOptions.Share = append(createOptions.Share, strings.Split(specgen.DefaultKernelNamespaces, ",")...)
+ share = share[1:]
+ }
+ createOptions.Share = append(createOptions.Share, strings.Split(share, ",")...)
createOptions.ShareParent = &shareParent
if cmd.Flag("infra-command").Changed {
// Only send content to server side if user changed defaults
@@ -180,10 +193,10 @@ func create(cmd *cobra.Command, args []string) error {
if cmd.Flag("pod-id-file").Changed {
podIDFD, err = util.OpenExclusiveFile(podIDFile)
if err != nil && os.IsExist(err) {
- return errors.Errorf("pod id file exists. Ensure another pod is not using it or delete %s", podIDFile)
+ return fmt.Errorf("pod id file exists. Ensure another pod is not using it or delete %s", podIDFile)
}
if err != nil {
- return errors.Errorf("opening pod-id-file %s", podIDFile)
+ return fmt.Errorf("opening pod-id-file %s", podIDFile)
}
defer errorhandling.CloseQuiet(podIDFD)
defer errorhandling.SyncQuiet(podIDFD)
@@ -191,7 +204,7 @@ func create(cmd *cobra.Command, args []string) error {
if len(createOptions.Net.PublishPorts) > 0 {
if !createOptions.Infra {
- return errors.Errorf("you must have an infra container to publish port bindings to the host")
+ return fmt.Errorf("you must have an infra container to publish port bindings to the host")
}
}
@@ -218,7 +231,7 @@ func create(cmd *cobra.Command, args []string) error {
ret, err := parsers.ParseUintList(copy)
copy = ""
if err != nil {
- return errors.Wrapf(err, "could not parse list")
+ return fmt.Errorf("could not parse list: %w", err)
}
var vals []int
for ind, val := range ret {
@@ -264,6 +277,7 @@ func create(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
+
podSpec.Volumes = podSpec.InfraContainerSpec.Volumes
podSpec.ImageVolumes = podSpec.InfraContainerSpec.ImageVolumes
podSpec.OverlayVolumes = podSpec.InfraContainerSpec.OverlayVolumes
@@ -288,7 +302,7 @@ func create(cmd *cobra.Command, args []string) error {
if len(podIDFile) > 0 {
if err = ioutil.WriteFile(podIDFile, []byte(response.Id), 0644); err != nil {
- return errors.Wrapf(err, "failed to write pod ID to file")
+ return fmt.Errorf("failed to write pod ID to file: %w", err)
}
}
fmt.Println(response.Id)
diff --git a/cmd/podman/pods/inspect.go b/cmd/podman/pods/inspect.go
index bb30fe6e6..082e8d9a1 100644
--- a/cmd/podman/pods/inspect.go
+++ b/cmd/podman/pods/inspect.go
@@ -2,6 +2,7 @@ package pods
import (
"context"
+ "errors"
"os"
"text/template"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -49,10 +49,10 @@ func init() {
func inspect(cmd *cobra.Command, args []string) error {
if len(args) < 1 && !inspectOptions.Latest {
- return errors.Errorf("you must provide the name or id of a running pod")
+ return errors.New("you must provide the name or id of a running pod")
}
if len(args) > 0 && inspectOptions.Latest {
- return errors.Errorf("--latest and containers cannot be used together")
+ return errors.New("--latest and containers cannot be used together")
}
if !inspectOptions.Latest {
diff --git a/cmd/podman/pods/kill.go b/cmd/podman/pods/kill.go
index 7216e08bb..5d3b15dc3 100644
--- a/cmd/podman/pods/kill.go
+++ b/cmd/podman/pods/kill.go
@@ -22,7 +22,7 @@ var (
Long: podKillDescription,
RunE: kill,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompletePodsRunning,
Example: `podman pod kill podID
diff --git a/cmd/podman/pods/logs.go b/cmd/podman/pods/logs.go
index 28e7b7a43..0102d4b71 100644
--- a/cmd/podman/pods/logs.go
+++ b/cmd/podman/pods/logs.go
@@ -1,6 +1,8 @@
package pods
import (
+ "errors"
+ "fmt"
"os"
"github.com/containers/common/pkg/completion"
@@ -10,7 +12,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -100,7 +101,7 @@ func logs(_ *cobra.Command, args []string) error {
// parse time, error out if something is wrong
since, err := util.ParseInputTime(logsPodOptions.SinceRaw, true)
if err != nil {
- return errors.Wrapf(err, "error parsing --since %q", logsPodOptions.SinceRaw)
+ return fmt.Errorf("error parsing --since %q: %w", logsPodOptions.SinceRaw, err)
}
logsPodOptions.Since = since
}
@@ -108,14 +109,14 @@ func logs(_ *cobra.Command, args []string) error {
// parse time, error out if something is wrong
until, err := util.ParseInputTime(logsPodOptions.UntilRaw, false)
if err != nil {
- return errors.Wrapf(err, "error parsing --until %q", logsPodOptions.UntilRaw)
+ return fmt.Errorf("error parsing --until %q: %w", logsPodOptions.UntilRaw, err)
}
logsPodOptions.Until = until
}
// Remote can only process one container at a time
if registry.IsRemote() && logsPodOptions.ContainerName == "" {
- return errors.Wrapf(define.ErrInvalidArg, "-c or --container cannot be empty")
+ return fmt.Errorf("-c or --container cannot be empty: %w", define.ErrInvalidArg)
}
logsPodOptions.StdoutWriter = os.Stdout
diff --git a/cmd/podman/pods/pause.go b/cmd/podman/pods/pause.go
index adc54d171..389fb8415 100644
--- a/cmd/podman/pods/pause.go
+++ b/cmd/podman/pods/pause.go
@@ -22,7 +22,7 @@ var (
Long: podPauseDescription,
RunE: pause,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompletePodsRunning,
Example: `podman pod pause podID1 podID2
diff --git a/cmd/podman/pods/ps.go b/cmd/podman/pods/ps.go
index 1275e65dc..681c9c42e 100644
--- a/cmd/podman/pods/ps.go
+++ b/cmd/podman/pods/ps.go
@@ -2,6 +2,7 @@ package pods
import (
"context"
+ "errors"
"fmt"
"os"
"sort"
@@ -15,7 +16,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -24,7 +24,7 @@ var (
// Command: podman pod _ps_
psCmd = &cobra.Command{
- Use: "ps [options]",
+ Use: "ps [options]",
Aliases: []string{"ls", "list"},
Short: "List pods",
Long: psDescription,
@@ -49,7 +49,6 @@ func init() {
flags.BoolVar(&psInput.CtrNames, "ctr-names", false, "Display the container names")
flags.BoolVar(&psInput.CtrIds, "ctr-ids", false, "Display the container UUIDs. If no-trunc is not set they will be truncated")
flags.BoolVar(&psInput.CtrStatus, "ctr-status", false, "Display the container status")
- // TODO should we make this a [] ?
filterFlagName := "filter"
flags.StringSliceVarP(&inputFilters, filterFlagName, "f", []string{}, "Filter output based on conditions given")
@@ -81,7 +80,7 @@ func pods(cmd *cobra.Command, _ []string) error {
for _, f := range inputFilters {
split := strings.SplitN(f, "=", 2)
if len(split) < 2 {
- return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
+ return fmt.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
}
psInput.Filters[split[0]] = append(psInput.Filters[split[0]], split[1])
}
@@ -212,7 +211,7 @@ func (l ListPodReporter) ID() string {
}
// Id returns the Pod id
-func (l ListPodReporter) Id() string { // nolint
+func (l ListPodReporter) Id() string { //nolint:revive,stylecheck
if noTrunc {
return l.ListPodsReport.Id
}
@@ -226,7 +225,7 @@ func (l ListPodReporter) InfraID() string {
// InfraId returns the infra container id for the pod
// depending on trunc
-func (l ListPodReporter) InfraId() string { // nolint
+func (l ListPodReporter) InfraId() string { //nolint:revive,stylecheck
if len(l.ListPodsReport.InfraId) == 0 {
return ""
}
@@ -277,7 +276,7 @@ func sortPodPsOutput(sortBy string, lprs []*entities.ListPodsReport) error {
case "status":
sort.Sort(podPsSortedStatus{lprs})
default:
- return errors.Errorf("invalid option for --sort, options are: id, names, or number")
+ return errors.New("invalid option for --sort, options are: id, names, or number")
}
return nil
}
diff --git a/cmd/podman/pods/restart.go b/cmd/podman/pods/restart.go
index 6d624806a..a8e31ce07 100644
--- a/cmd/podman/pods/restart.go
+++ b/cmd/podman/pods/restart.go
@@ -22,7 +22,7 @@ var (
Long: podRestartDescription,
RunE: restart,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
ValidArgsFunction: common.AutocompletePods,
Example: `podman pod restart podID1 podID2
diff --git a/cmd/podman/pods/rm.go b/cmd/podman/pods/rm.go
index 52a815534..2ffd968f9 100644
--- a/cmd/podman/pods/rm.go
+++ b/cmd/podman/pods/rm.go
@@ -2,6 +2,7 @@ package pods
import (
"context"
+ "errors"
"fmt"
"strings"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/specgenutil"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -35,7 +35,7 @@ var (
Long: podRmDescription,
RunE: rm,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndPodIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "pod-id-file")
},
ValidArgsFunction: common.AutocompletePods,
Example: `podman pod rm mywebserverpod
@@ -112,11 +112,7 @@ func removePods(namesOrIDs []string, rmOptions entities.PodRmOptions, printIDs b
}
func setExitCode(err error) {
- cause := errors.Cause(err)
- switch {
- case cause == define.ErrNoSuchPod:
- registry.SetExitCode(1)
- case strings.Contains(cause.Error(), define.ErrNoSuchPod.Error()):
+ if errors.Is(err, define.ErrNoSuchPod) || strings.Contains(err.Error(), define.ErrNoSuchPod.Error()) {
registry.SetExitCode(1)
}
}
diff --git a/cmd/podman/pods/start.go b/cmd/podman/pods/start.go
index b668cdd61..9436d34a5 100644
--- a/cmd/podman/pods/start.go
+++ b/cmd/podman/pods/start.go
@@ -31,7 +31,7 @@ var (
Long: podStartDescription,
RunE: start,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndPodIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "pod-id-file")
},
ValidArgsFunction: common.AutocompletePods,
Example: `podman pod start podID
diff --git a/cmd/podman/pods/stop.go b/cmd/podman/pods/stop.go
index c8c3d2732..e8f82bee9 100644
--- a/cmd/podman/pods/stop.go
+++ b/cmd/podman/pods/stop.go
@@ -36,7 +36,7 @@ var (
Long: podStopDescription,
RunE: stop,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndPodIDFile(cmd, args, false, true)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "pod-id-file")
},
ValidArgsFunction: common.AutocompletePodsRunning,
Example: `podman pod stop mywebserverpod
diff --git a/cmd/podman/pods/top.go b/cmd/podman/pods/top.go
index 4e9c7a3ee..34f3d1c33 100644
--- a/cmd/podman/pods/top.go
+++ b/cmd/podman/pods/top.go
@@ -2,6 +2,7 @@ package pods
import (
"context"
+ "errors"
"fmt"
"os"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -67,7 +67,7 @@ func top(_ *cobra.Command, args []string) error {
}
if len(args) < 1 && !topOptions.Latest {
- return errors.Errorf("you must provide the name or id of a running pod")
+ return errors.New("you must provide the name or id of a running pod")
}
if topOptions.Latest {
diff --git a/cmd/podman/pods/unpause.go b/cmd/podman/pods/unpause.go
index a308a82c3..47b29458b 100644
--- a/cmd/podman/pods/unpause.go
+++ b/cmd/podman/pods/unpause.go
@@ -22,11 +22,9 @@ var (
Long: podUnpauseDescription,
RunE: unpause,
Args: func(cmd *cobra.Command, args []string) error {
- return validate.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return validate.CheckAllLatestAndIDFile(cmd, args, false, "")
},
- // TODO have a function which shows only pods which could be unpaused
- // for now show all
- ValidArgsFunction: common.AutocompletePods,
+ ValidArgsFunction: common.AutoCompletePodsPause,
Example: `podman pod unpause podID1 podID2
podman pod unpause --all
podman pod unpause --latest`,
@@ -43,7 +41,7 @@ func init() {
Parent: podCmd,
})
flags := unpauseCommand.Flags()
- flags.BoolVarP(&unpauseOptions.All, "all", "a", false, "Pause all running pods")
+ flags.BoolVarP(&unpauseOptions.All, "all", "a", false, "Unpause all running pods")
validate.AddLatestFlag(unpauseCommand, &unpauseOptions.Latest)
}
diff --git a/cmd/podman/registry/config.go b/cmd/podman/registry/config.go
index b5c9b359c..cae618b44 100644
--- a/cmd/podman/registry/config.go
+++ b/cmd/podman/registry/config.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
const (
@@ -92,14 +91,14 @@ func setXdgDirs() error {
return nil
}
- // Setup XDG_RUNTIME_DIR
+ // Set up XDG_RUNTIME_DIR
if _, found := os.LookupEnv("XDG_RUNTIME_DIR"); !found {
dir, err := util.GetRuntimeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_RUNTIME_DIR", dir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR="+dir)
+ return fmt.Errorf("cannot set XDG_RUNTIME_DIR=%s: %w", dir, err)
}
}
@@ -110,14 +109,14 @@ func setXdgDirs() error {
}
}
- // Setup XDG_CONFIG_HOME
+ // Set up XDG_CONFIG_HOME
if _, found := os.LookupEnv("XDG_CONFIG_HOME"); !found {
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME="+cfgHomeDir)
+ return fmt.Errorf("cannot set XDG_CONFIG_HOME=%s: %w", cfgHomeDir, err)
}
}
return nil
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 2bd4fa723..f28d92e2f 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -1,6 +1,7 @@
package main
import (
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -20,7 +21,6 @@ import (
"github.com/containers/podman/v4/pkg/parallel"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/version"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -137,22 +137,20 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
if cmd.Flag("import").Changed {
runtime, err := crutils.CRGetRuntimeFromArchive(cmd.Flag("import").Value.String())
if err != nil {
- return errors.Wrapf(
- err,
- "failed extracting runtime information from %s",
- cmd.Flag("import").Value.String(),
+ return fmt.Errorf(
+ "failed extracting runtime information from %s: %w",
+ cmd.Flag("import").Value.String(), err,
)
}
- if cfg.RuntimePath == "" {
+
+ runtimeFlag := cmd.Root().Flag("runtime")
+ if runtimeFlag == nil {
+ return errors.New("failed to load --runtime flag")
+ }
+
+ if !runtimeFlag.Changed {
// If the user did not select a runtime, this takes the one from
// the checkpoint archives and tells Podman to use it for the restore.
- runtimeFlag := cmd.Root().Flags().Lookup("runtime")
- if runtimeFlag == nil {
- return errors.Errorf(
- "setting runtime to '%s' for restore",
- *runtime,
- )
- }
if err := runtimeFlag.Value.Set(*runtime); err != nil {
return err
}
@@ -161,7 +159,7 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
} else if cfg.RuntimePath != *runtime {
// If the user selected a runtime on the command-line this checks if
// it is the same then during checkpointing and errors out if not.
- return errors.Errorf(
+ return fmt.Errorf(
"checkpoint archive %s was created with runtime '%s' and cannot be restored with runtime '%s'",
cmd.Flag("import").Value.String(),
*runtime,
@@ -179,15 +177,15 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
var err error
cfg.URI, cfg.Identity, err = cfg.ActiveDestination()
if err != nil {
- return errors.Wrap(err, "failed to resolve active destination")
+ return fmt.Errorf("failed to resolve active destination: %w", err)
}
if err := cmd.Root().LocalFlags().Set("url", cfg.URI); err != nil {
- return errors.Wrap(err, "failed to override --url flag")
+ return fmt.Errorf("failed to override --url flag: %w", err)
}
if err := cmd.Root().LocalFlags().Set("identity", cfg.Identity); err != nil {
- return errors.Wrap(err, "failed to override --identity flag")
+ return fmt.Errorf("failed to override --identity flag: %w", err)
}
}
@@ -256,7 +254,7 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
}
if cfg.MaxWorks <= 0 {
- return errors.Errorf("maximum workers must be set to a positive number (got %d)", cfg.MaxWorks)
+ return fmt.Errorf("maximum workers must be set to a positive number (got %d)", cfg.MaxWorks)
}
if err := parallel.SetMaxThreads(uint(cfg.MaxWorks)); err != nil {
return err
@@ -298,12 +296,12 @@ func persistentPostRunE(cmd *cobra.Command, args []string) error {
if cmd.Flag("memory-profile").Changed {
f, err := os.Create(registry.PodmanConfig().MemoryProfile)
if err != nil {
- return errors.Wrap(err, "creating memory profile")
+ return fmt.Errorf("creating memory profile: %w", err)
}
defer f.Close()
runtime.GC() // get up-to-date GC statistics
if err := pprof.WriteHeapProfile(f); err != nil {
- return errors.Wrap(err, "writing memory profile")
+ return fmt.Errorf("writing memory profile: %w", err)
}
}
@@ -423,7 +421,7 @@ func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) {
// -s is deprecated due to conflict with -s on subcommands
storageDriverFlagName := "storage-driver"
pFlags.StringVar(&opts.StorageDriver, storageDriverFlagName, "", "Select which storage driver is used to manage storage of images and containers")
- _ = cmd.RegisterFlagCompletionFunc(storageDriverFlagName, completion.AutocompleteNone) //TODO: what can we recommend here?
+ _ = cmd.RegisterFlagCompletionFunc(storageDriverFlagName, completion.AutocompleteNone)
tmpdirFlagName := "tmpdir"
pFlags.StringVar(&opts.Engine.TmpDir, tmpdirFlagName, "", "Path to the tmp directory for libpod state content.\n\nNote: use the environment variable 'TMPDIR' to change the temporary storage location for container images, '/var/tmp'.\n")
@@ -482,7 +480,7 @@ func resolveDestination() (string, string, string) {
cfg, err := config.ReadCustomConfig()
if err != nil {
- logrus.Warning(errors.Wrap(err, "unable to read local containers.conf"))
+ logrus.Warning(fmt.Errorf("unable to read local containers.conf: %w", err))
return "", registry.DefaultAPIAddress(), ""
}
@@ -495,7 +493,7 @@ func resolveDestination() (string, string, string) {
func formatError(err error) string {
var message string
- if errors.Cause(err) == define.ErrOCIRuntime {
+ if errors.Is(err, define.ErrOCIRuntime) {
// OCIRuntimeErrors include the reason for the failure in the
// second to last message in the error chain.
message = fmt.Sprintf(
diff --git a/cmd/podman/root_test.go b/cmd/podman/root_test.go
index 0a73afdc4..98a3de79d 100644
--- a/cmd/podman/root_test.go
+++ b/cmd/podman/root_test.go
@@ -1,12 +1,12 @@
package main
import (
+ "errors"
"fmt"
"strings"
"testing"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
func TestFormatError(t *testing.T) {
@@ -22,7 +22,7 @@ func TestFormatError(t *testing.T) {
func TestFormatOCIError(t *testing.T) {
expectedPrefix := "Error: "
expectedSuffix := "OCI runtime output"
- err := errors.Wrap(define.ErrOCIRuntime, expectedSuffix)
+ err := fmt.Errorf("%s: %w", expectedSuffix, define.ErrOCIRuntime)
output := formatError(err)
if !strings.HasPrefix(output, expectedPrefix) {
diff --git a/cmd/podman/secrets/create.go b/cmd/podman/secrets/create.go
index 01ee3d256..8ecfecf69 100644
--- a/cmd/podman/secrets/create.go
+++ b/cmd/podman/secrets/create.go
@@ -2,6 +2,7 @@ package secrets
import (
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -66,7 +66,7 @@ func create(cmd *cobra.Command, args []string) error {
case env:
envValue := os.Getenv(path)
if envValue == "" {
- return errors.Errorf("cannot create store secret data: environment variable %s is not set", path)
+ return fmt.Errorf("cannot create store secret data: environment variable %s is not set", path)
}
reader = strings.NewReader(envValue)
case path == "-" || path == "/dev/stdin":
diff --git a/cmd/podman/secrets/inspect.go b/cmd/podman/secrets/inspect.go
index 473d5620c..1fcc676b4 100644
--- a/cmd/podman/secrets/inspect.go
+++ b/cmd/podman/secrets/inspect.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -78,7 +77,7 @@ func inspect(cmd *cobra.Command, args []string) error {
fmt.Fprintf(os.Stderr, "error inspecting secret: %v\n", err)
}
}
- return errors.Errorf("inspecting secret: %v", errs[0])
+ return fmt.Errorf("inspecting secret: %w", errs[0])
}
return nil
}
diff --git a/cmd/podman/secrets/list.go b/cmd/podman/secrets/list.go
index 558a16ccf..8b1956eab 100644
--- a/cmd/podman/secrets/list.go
+++ b/cmd/podman/secrets/list.go
@@ -2,6 +2,7 @@ package secrets
import (
"context"
+ "fmt"
"os"
"time"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -108,7 +108,7 @@ func outputTemplate(cmd *cobra.Command, responses []*entities.SecretListReport)
if !listFlag.noHeading {
if err := tmpl.Execute(w, headers); err != nil {
- return errors.Wrapf(err, "failed to write report column headers")
+ return fmt.Errorf("failed to write report column headers: %w", err)
}
}
return tmpl.Execute(w, responses)
diff --git a/cmd/podman/system/connection/add.go b/cmd/podman/system/connection/add.go
index 387de3c58..191603718 100644
--- a/cmd/podman/system/connection/add.go
+++ b/cmd/podman/system/connection/add.go
@@ -2,25 +2,22 @@ package connection
import (
"encoding/json"
+ "errors"
"fmt"
"net"
"net/url"
"os"
- "os/user"
"regexp"
- "time"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/system"
"github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/pkg/terminal"
- "github.com/pkg/errors"
+ "github.com/containers/podman/v4/pkg/domain/utils"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/agent"
)
var (
@@ -79,7 +76,7 @@ func add(cmd *cobra.Command, args []string) error {
// Default to ssh schema if none given
dest := args[1]
if match, err := regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(dest)); err != nil {
- return errors.Wrapf(err, "invalid destination")
+ return fmt.Errorf("invalid destination: %w", err)
} else if !match {
dest = "ssh://" + dest
}
@@ -95,7 +92,7 @@ func add(cmd *cobra.Command, args []string) error {
switch uri.Scheme {
case "ssh":
if uri.User.Username() == "" {
- if uri.User, err = GetUserInfo(uri); err != nil {
+ if uri.User, err = utils.GetUserInfo(uri); err != nil {
return err
}
}
@@ -180,44 +177,20 @@ func add(cmd *cobra.Command, args []string) error {
return cfg.Write()
}
-func GetUserInfo(uri *url.URL) (*url.Userinfo, error) {
- var (
- usr *user.User
- err error
- )
- if u, found := os.LookupEnv("_CONTAINERS_ROOTLESS_UID"); found {
- usr, err = user.LookupId(u)
- if err != nil {
- return nil, errors.Wrapf(err, "failed to lookup rootless user")
- }
- } else {
- usr, err = user.Current()
- if err != nil {
- return nil, errors.Wrapf(err, "failed to obtain current user")
- }
- }
-
- pw, set := uri.User.Password()
- if set {
- return url.UserPassword(usr.Username, pw), nil
- }
- return url.User(usr.Username), nil
-}
-
func getUDS(uri *url.URL, iden string) (string, error) {
- cfg, err := ValidateAndConfigure(uri, iden)
+ cfg, err := utils.ValidateAndConfigure(uri, iden)
if err != nil {
- return "", errors.Wrapf(err, "failed to validate")
+ return "", fmt.Errorf("failed to validate: %w", err)
}
dial, err := ssh.Dial("tcp", uri.Host, cfg)
if err != nil {
- return "", errors.Wrapf(err, "failed to connect")
+ return "", fmt.Errorf("failed to connect: %w", err)
}
defer dial.Close()
session, err := dial.NewSession()
if err != nil {
- return "", errors.Wrapf(err, "failed to create new ssh session on %q", uri.Host)
+ return "", fmt.Errorf("failed to create new ssh session on %q: %w", uri.Host, err)
}
defer session.Close()
@@ -226,94 +199,18 @@ func getUDS(uri *url.URL, iden string) (string, error) {
if v, found := os.LookupEnv("PODMAN_BINARY"); found {
podman = v
}
- infoJSON, err := ExecRemoteCommand(dial, podman+" info --format=json")
+ infoJSON, err := utils.ExecRemoteCommand(dial, podman+" info --format=json")
if err != nil {
return "", err
}
var info define.Info
if err := json.Unmarshal(infoJSON, &info); err != nil {
- return "", errors.Wrapf(err, "failed to parse 'podman info' results")
+ return "", fmt.Errorf("failed to parse 'podman info' results: %w", err)
}
if info.Host.RemoteSocket == nil || len(info.Host.RemoteSocket.Path) == 0 {
- return "", errors.Errorf("remote podman %q failed to report its UDS socket", uri.Host)
+ return "", fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host)
}
return info.Host.RemoteSocket.Path, nil
}
-
-// ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid
-// iden iden can be blank to mean no identity key
-// once the function validates the information it creates and returns an ssh.ClientConfig.
-func ValidateAndConfigure(uri *url.URL, iden string) (*ssh.ClientConfig, error) {
- var signers []ssh.Signer
- passwd, passwdSet := uri.User.Password()
- if iden != "" { // iden might be blank if coming from image scp or if no validation is needed
- value := iden
- s, err := terminal.PublicKey(value, []byte(passwd))
- if err != nil {
- return nil, errors.Wrapf(err, "failed to read identity %q", value)
- }
- signers = append(signers, s)
- logrus.Debugf("SSH Ident Key %q %s %s", value, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
- }
- if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { // validate ssh information, specifically the unix file socket used by the ssh agent.
- logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer enabled", sock)
-
- c, err := net.Dial("unix", sock)
- if err != nil {
- return nil, err
- }
- agentSigners, err := agent.NewClient(c).Signers()
- if err != nil {
- return nil, err
- }
-
- signers = append(signers, agentSigners...)
-
- if logrus.IsLevelEnabled(logrus.DebugLevel) {
- for _, s := range agentSigners {
- logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
- }
- }
- }
- var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization
- if len(signers) > 0 {
- var dedup = make(map[string]ssh.Signer)
- for _, s := range signers {
- fp := ssh.FingerprintSHA256(s.PublicKey())
- if _, found := dedup[fp]; found {
- logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
- }
- dedup[fp] = s
- }
-
- var uniq []ssh.Signer
- for _, s := range dedup {
- uniq = append(uniq, s)
- }
- authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) {
- return uniq, nil
- }))
- }
- if passwdSet { // if password authentication is given and valid, add to the list
- authMethods = append(authMethods, ssh.Password(passwd))
- }
- if len(authMethods) == 0 {
- authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) {
- pass, err := terminal.ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username()))
- return string(pass), err
- }))
- }
- tick, err := time.ParseDuration("40s")
- if err != nil {
- return nil, err
- }
- cfg := &ssh.ClientConfig{
- User: uri.User.Username(),
- Auth: authMethods,
- HostKeyCallback: ssh.InsecureIgnoreHostKey(),
- Timeout: tick,
- }
- return cfg, nil
-}
diff --git a/cmd/podman/system/connection/remove.go b/cmd/podman/system/connection/remove.go
index 463eae9fa..29bf98c43 100644
--- a/cmd/podman/system/connection/remove.go
+++ b/cmd/podman/system/connection/remove.go
@@ -1,11 +1,12 @@
package connection
import (
+ "errors"
+
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/system"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
diff --git a/cmd/podman/system/connection/shared.go b/cmd/podman/system/connection/shared.go
deleted file mode 100644
index 714ae827d..000000000
--- a/cmd/podman/system/connection/shared.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package connection
-
-import (
- "bytes"
-
- "github.com/pkg/errors"
- "golang.org/x/crypto/ssh"
-)
-
-// ExecRemoteCommand takes a ssh client connection and a command to run and executes the
-// command on the specified client. The function returns the Stdout from the client or the Stderr
-func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) {
- sess, err := dial.NewSession() // new ssh client session
- if err != nil {
- return nil, err
- }
- defer sess.Close()
-
- var buffer bytes.Buffer
- var bufferErr bytes.Buffer
- sess.Stdout = &buffer // output from client funneled into buffer
- sess.Stderr = &bufferErr // err form client funneled into buffer
- if err := sess.Run(run); err != nil { // run the command on the ssh client
- return nil, errors.Wrapf(err, bufferErr.String())
- }
- return buffer.Bytes(), nil
-}
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index dad14df6b..5b8126be6 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -78,11 +78,11 @@ func printSummary(cmd *cobra.Command, reports *entities.SystemDfReport) error {
}
}
imageSummary := dfSummary{
- Type: "Images",
- Total: len(reports.Images),
- Active: active,
- size: size,
- reclaimable: reclaimable,
+ Type: "Images",
+ Total: len(reports.Images),
+ Active: active,
+ RawSize: size,
+ RawReclaimable: reclaimable,
}
dfSummaries = append(dfSummaries, &imageSummary)
@@ -100,11 +100,11 @@ func printSummary(cmd *cobra.Command, reports *entities.SystemDfReport) error {
conSize += c.RWSize
}
containerSummary := dfSummary{
- Type: "Containers",
- Total: len(reports.Containers),
- Active: conActive,
- size: conSize,
- reclaimable: conReclaimable,
+ Type: "Containers",
+ Total: len(reports.Containers),
+ Active: conActive,
+ RawSize: conSize,
+ RawReclaimable: conReclaimable,
}
dfSummaries = append(dfSummaries, &containerSummary)
@@ -120,11 +120,11 @@ func printSummary(cmd *cobra.Command, reports *entities.SystemDfReport) error {
volumesReclaimable += v.ReclaimableSize
}
volumeSummary := dfSummary{
- Type: "Local Volumes",
- Total: len(reports.Volumes),
- Active: activeVolumes,
- size: volumesSize,
- reclaimable: volumesReclaimable,
+ Type: "Local Volumes",
+ Total: len(reports.Volumes),
+ Active: activeVolumes,
+ RawSize: volumesSize,
+ RawReclaimable: volumesReclaimable,
}
dfSummaries = append(dfSummaries, &volumeSummary)
@@ -150,7 +150,7 @@ func printSummary(cmd *cobra.Command, reports *entities.SystemDfReport) error {
return writeTemplate(rpt, hdrs, dfSummaries)
}
-func printVerbose(cmd *cobra.Command, reports *entities.SystemDfReport) error { // nolint:interfacer
+func printVerbose(cmd *cobra.Command, reports *entities.SystemDfReport) error { //nolint:interfacer
rpt := report.New(os.Stdout, cmd.Name())
defer rpt.Flush()
@@ -277,22 +277,22 @@ func (d *dfVolume) Size() string {
}
type dfSummary struct {
- Type string
- Total int
- Active int
- size int64
- reclaimable int64
+ Type string
+ Total int
+ Active int
+ RawSize int64 `json:"Size"`
+ RawReclaimable int64 `json:"Reclaimable"`
}
func (d *dfSummary) Size() string {
- return units.HumanSize(float64(d.size))
+ return units.HumanSize(float64(d.RawSize))
}
func (d *dfSummary) Reclaimable() string {
percent := 0
// make sure to check this to prevent div by zero problems
- if d.size > 0 {
- percent = int(math.Round(float64(d.reclaimable) / float64(d.size) * float64(100)))
+ if d.RawSize > 0 {
+ percent = int(math.Round(float64(d.RawReclaimable) / float64(d.RawSize) * float64(100)))
}
- return fmt.Sprintf("%s (%d%%)", units.HumanSize(float64(d.reclaimable)), percent)
+ return fmt.Sprintf("%s (%d%%)", units.HumanSize(float64(d.RawReclaimable)), percent)
}
diff --git a/cmd/podman/system/dial_stdio.go b/cmd/podman/system/dial_stdio.go
index 8b665bedc..42ce65746 100644
--- a/cmd/podman/system/dial_stdio.go
+++ b/cmd/podman/system/dial_stdio.go
@@ -2,13 +2,15 @@ package system
import (
"context"
+ "fmt"
"io"
"os"
+ "errors"
+
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/bindings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -40,15 +42,15 @@ func runDialStdio() error {
defer cancel()
bindCtx, err := bindings.NewConnection(ctx, cfg.URI)
if err != nil {
- return errors.Wrap(err, "failed to open connection to podman")
+ return fmt.Errorf("failed to open connection to podman: %w", err)
}
conn, err := bindings.GetClient(bindCtx)
if err != nil {
- return errors.Wrap(err, "failed to get connection after initialization")
+ return fmt.Errorf("failed to get connection after initialization: %w", err)
}
netConn, err := conn.GetDialer(bindCtx)
if err != nil {
- return errors.Wrap(err, "failed to open the raw stream connection")
+ return fmt.Errorf("failed to open the raw stream connection: %w", err)
}
defer netConn.Close()
@@ -95,7 +97,7 @@ func copier(to halfWriteCloser, from halfReadCloser, debugDescription string) er
}
}()
if _, err := io.Copy(to, from); err != nil {
- return errors.Wrapf(err, "error while Copy (%s)", debugDescription)
+ return fmt.Errorf("error while Copy (%s): %w", debugDescription, err)
}
return nil
}
diff --git a/cmd/podman/system/prune.go b/cmd/podman/system/prune.go
index ff78f93bb..1d6ba8155 100644
--- a/cmd/podman/system/prune.go
+++ b/cmd/podman/system/prune.go
@@ -75,6 +75,7 @@ func prune(cmd *cobra.Command, args []string) error {
}
}
+ // Remove all unused pods, containers, images, networks, and volume data.
pruneOptions.Filters, err = parse.FilterArgumentsIntoFilters(filters)
if err != nil {
return err
@@ -106,6 +107,11 @@ func prune(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
+ // Print Network prune results
+ err = utils.PrintNetworkPruneResults(response.NetworkPruneReports, true)
+ if err != nil {
+ return err
+ }
fmt.Printf("Total reclaimed space: %s\n", units.HumanSize((float64)(response.ReclaimedSpace)))
return nil
diff --git a/cmd/podman/system/reset.go b/cmd/podman/system/reset.go
index 176573bf6..20f15a34f 100644
--- a/cmd/podman/system/reset.go
+++ b/cmd/podman/system/reset.go
@@ -91,18 +91,10 @@ func reset(cmd *cobra.Command, args []string) {
registry.ContainerEngine().Shutdown(registry.Context())
registry.ImageEngine().Shutdown(registry.Context())
- engine, err := infra.NewSystemEngine(entities.ResetMode, registry.PodmanConfig())
- if err != nil {
- logrus.Error(err)
- os.Exit(define.ExecErrorCodeGeneric)
- }
- defer engine.Shutdown(registry.Context())
-
- if err := engine.Reset(registry.Context()); err != nil {
+ // Do not try to shut the engine down, as a Reset engine is not valid
+ // after its creation.
+ if _, err := infra.NewSystemEngine(entities.ResetMode, registry.PodmanConfig()); err != nil {
logrus.Error(err)
- // FIXME change this to return the error like other commands
- // defer will never run on os.Exit()
- //nolint:gocritic
os.Exit(define.ExecErrorCodeGeneric)
}
diff --git a/cmd/podman/system/service_abi.go b/cmd/podman/system/service_abi.go
index 9dc9de1c8..6823d77ba 100644
--- a/cmd/podman/system/service_abi.go
+++ b/cmd/podman/system/service_abi.go
@@ -4,24 +4,46 @@
package system
import (
+ "errors"
"fmt"
"net"
"net/url"
"os"
"path/filepath"
+ "github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/cmd/podman/registry"
api "github.com/containers/podman/v4/pkg/api/server"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra"
"github.com/containers/podman/v4/pkg/servicereaper"
+ "github.com/containers/podman/v4/utils"
"github.com/coreos/go-systemd/v22/activation"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"golang.org/x/sys/unix"
)
+// maybeMoveToSubCgroup moves the current process in a sub cgroup when
+// it is running in the root cgroup on a system that uses cgroupv2.
+func maybeMoveToSubCgroup() error {
+ unifiedMode, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return err
+ }
+ if !unifiedMode {
+ return nil
+ }
+ cgroup, err := utils.GetOwnCgroup()
+ if err != nil {
+ return err
+ }
+ if cgroup == "/" {
+ return utils.MoveUnderCgroupSubtree("init")
+ }
+ return nil
+}
+
func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities.ServiceOptions) error {
var (
listener net.Listener
@@ -46,11 +68,15 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
return fmt.Errorf("wrong number of file descriptors for socket activation protocol (%d != 1)", len(listeners))
}
listener = listeners[0]
+ // note that activation.Listeners() returns nil when it cannot listen on the fd (i.e. udp connection)
+ if listener == nil {
+ return errors.New("unexpected fd received from systemd: cannot listen on it")
+ }
libpodRuntime.SetRemoteURI(listeners[0].Addr().String())
} else {
uri, err := url.Parse(opts.URI)
if err != nil {
- return errors.Errorf("%s is an invalid socket destination", opts.URI)
+ return fmt.Errorf("%s is an invalid socket destination", opts.URI)
}
switch uri.Scheme {
@@ -70,7 +96,7 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
} else {
listener, err = net.Listen(uri.Scheme, path)
if err != nil {
- return errors.Wrapf(err, "unable to create socket")
+ return fmt.Errorf("unable to create socket: %w", err)
}
}
case "tcp":
@@ -81,7 +107,7 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
}
listener, err = net.Listen(uri.Scheme, host)
if err != nil {
- return errors.Wrapf(err, "unable to create socket %v", host)
+ return fmt.Errorf("unable to create socket %v: %w", host, err)
}
default:
logrus.Debugf("Attempting API Service endpoint scheme %q", uri.Scheme)
@@ -99,6 +125,10 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
return err
}
+ if err := maybeMoveToSubCgroup(); err != nil {
+ return err
+ }
+
servicereaper.Start()
infra.StartWatcher(libpodRuntime)
server, err := api.NewServerWithSettings(libpodRuntime, listener, opts)
diff --git a/cmd/podman/system/unshare.go b/cmd/podman/system/unshare.go
index 0ae5b81ad..6d9c33b64 100644
--- a/cmd/podman/system/unshare.go
+++ b/cmd/podman/system/unshare.go
@@ -1,14 +1,14 @@
package system
import (
+ "errors"
"os"
- "os/exec"
"github.com/containers/common/pkg/completion"
"github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -47,35 +47,18 @@ func init() {
func unshare(cmd *cobra.Command, args []string) error {
if isRootless := rootless.IsRootless(); !isRootless {
- return errors.Errorf("please use unshare with rootless")
+ return errors.New("please use unshare with rootless")
}
// exec the specified command, if there is one
if len(args) < 1 {
// try to exec the shell, if one's set
shell, shellSet := os.LookupEnv("SHELL")
if !shellSet {
- return errors.Errorf("no command specified and no $SHELL specified")
+ return errors.New("no command specified and no $SHELL specified")
}
args = []string{shell}
}
err := registry.ContainerEngine().Unshare(registry.Context(), args, unshareOptions)
- if err != nil {
- if exitError, ok := err.(*exec.ExitError); ok {
- // the user command inside the unshare env has failed
- // we set the exit code, do not return the error to the user
- // otherwise "exit status X" will be printed
- registry.SetExitCode(exitError.ExitCode())
- return nil
- }
- // cmd.Run() can return fs.ErrNotExist, fs.ErrPermission or exec.ErrNotFound
- // follow podman run/exec standard with the exit codes
- if errors.Is(err, os.ErrNotExist) || errors.Is(err, exec.ErrNotFound) {
- registry.SetExitCode(127)
- } else if errors.Is(err, os.ErrPermission) {
- registry.SetExitCode(126)
- }
- return err
- }
- return nil
+ return utils.HandleOSExecError(err)
}
diff --git a/cmd/podman/utils/error.go b/cmd/podman/utils/error.go
index 2aaa71373..3efff0301 100644
--- a/cmd/podman/utils/error.go
+++ b/cmd/podman/utils/error.go
@@ -4,10 +4,12 @@ import (
"errors"
"fmt"
"os"
+ "os/exec"
"strconv"
"strings"
buildahCLI "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/podman/v4/cmd/podman/registry"
)
type OutputErrors []error
@@ -43,3 +45,33 @@ func ExitCodeFromBuildError(errorMsg string) (int, error) {
}
return buildahCLI.ExecErrorCodeGeneric, errors.New("message does not contains a valid exit code")
}
+
+// HandleOSExecError checks the given error for an exec.ExitError error and
+// sets the same podman exit code as the error.
+// No error will be returned in this case to make sure things like podman
+// unshare false work correctly without extra output.
+// When the exec file does not exists we set the exit code to 127, for
+// permission errors 126 is used as exit code. In this case we still return
+// the error so the user gets an error message.
+// If the error is nil it returns nil.
+func HandleOSExecError(err error) error {
+ if err == nil {
+ return nil
+ }
+ var exitError *exec.ExitError
+ if errors.As(err, &exitError) {
+ // the user command inside the unshare/ssh env has failed
+ // we set the exit code, do not return the error to the user
+ // otherwise "exit status X" will be printed
+ registry.SetExitCode(exitError.ExitCode())
+ return nil
+ }
+ // cmd.Run() can return fs.ErrNotExist, fs.ErrPermission or exec.ErrNotFound
+ // follow podman run/exec standard with the exit codes
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, exec.ErrNotFound) {
+ registry.SetExitCode(127)
+ } else if errors.Is(err, os.ErrPermission) {
+ registry.SetExitCode(126)
+ }
+ return err
+}
diff --git a/cmd/podman/utils/signals_linux.go b/cmd/podman/utils/signals_linux.go
deleted file mode 100644
index dd0507c0e..000000000
--- a/cmd/podman/utils/signals_linux.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package utils
-
-import (
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// Platform specific signal synonyms
-var (
- SIGHUP os.Signal = unix.SIGHUP
-)
diff --git a/cmd/podman/utils/signals_windows.go b/cmd/podman/utils/signals_windows.go
deleted file mode 100644
index e6fcc1b32..000000000
--- a/cmd/podman/utils/signals_windows.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build windows
-// +build windows
-
-package utils
-
-import (
- "os"
-
- "golang.org/x/sys/windows"
-)
-
-// Platform specific signal synonyms
-var (
- SIGHUP os.Signal = windows.SIGHUP
-)
diff --git a/cmd/podman/utils/utils.go b/cmd/podman/utils/utils.go
index 6fd6647d0..2ae123388 100644
--- a/cmd/podman/utils/utils.go
+++ b/cmd/podman/utils/utils.go
@@ -44,7 +44,7 @@ func PrintPodPruneResults(podPruneReports []*entities.PodPruneReport, heading bo
func PrintContainerPruneResults(containerPruneReports []*reports.PruneReport, heading bool) error {
var errs OutputErrors
- if heading && (len(containerPruneReports) > 0) {
+ if heading && len(containerPruneReports) > 0 {
fmt.Println("Deleted Containers")
}
for _, v := range containerPruneReports {
@@ -72,7 +72,7 @@ func PrintVolumePruneResults(volumePruneReport []*reports.PruneReport, heading b
}
func PrintImagePruneResults(imagePruneReports []*reports.PruneReport, heading bool) error {
- if heading {
+ if heading && len(imagePruneReports) > 0 {
fmt.Println("Deleted Images")
}
for _, r := range imagePruneReports {
@@ -84,3 +84,18 @@ func PrintImagePruneResults(imagePruneReports []*reports.PruneReport, heading bo
return nil
}
+
+func PrintNetworkPruneResults(networkPruneReport []*reports.PruneReport, heading bool) error {
+ var errs OutputErrors
+ if heading && len(networkPruneReport) > 0 {
+ fmt.Println("Deleted Networks")
+ }
+ for _, r := range networkPruneReport {
+ if r.Err == nil {
+ fmt.Println(r.Id)
+ } else {
+ errs = append(errs, r.Err)
+ }
+ }
+ return errs.PrintErrors()
+}
diff --git a/cmd/podman/validate/args.go b/cmd/podman/validate/args.go
index 669456bd3..39eedca64 100644
--- a/cmd/podman/validate/args.go
+++ b/cmd/podman/validate/args.go
@@ -1,12 +1,12 @@
package validate
import (
+ "errors"
"fmt"
"strconv"
"strings"
"github.com/containers/podman/v4/cmd/podman/registry"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -23,12 +23,12 @@ func SubCommandExists(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
suggestions := cmd.SuggestionsFor(args[0])
if len(suggestions) == 0 {
- return errors.Errorf("unrecognized command `%[1]s %[2]s`\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0])
+ return fmt.Errorf("unrecognized command `%[1]s %[2]s`\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0])
}
- return errors.Errorf("unrecognized command `%[1]s %[2]s`\n\nDid you mean this?\n\t%[3]s\n\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0], strings.Join(suggestions, "\n\t"))
+ return fmt.Errorf("unrecognized command `%[1]s %[2]s`\n\nDid you mean this?\n\t%[3]s\n\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0], strings.Join(suggestions, "\n\t"))
}
- cmd.Help() // nolint: errcheck
- return errors.Errorf("missing command '%[1]s COMMAND'", cmd.CommandPath())
+ cmd.Help() //nolint: errcheck
+ return fmt.Errorf("missing command '%[1]s COMMAND'", cmd.CommandPath())
}
// IDOrLatestArgs used to validate a nameOrId was provided or the "--latest" flag
@@ -44,101 +44,56 @@ func IDOrLatestArgs(cmd *cobra.Command, args []string) error {
return fmt.Errorf("%q requires a name, id, or the \"--latest\" flag", cmd.CommandPath())
}
if len(args) > 0 && given {
- return fmt.Errorf("--latest and containers cannot be used together")
+ return errors.New("--latest and containers cannot be used together")
}
}
return nil
}
-// TODO: the two functions CheckAllLatestAndCIDFile and CheckAllLatestAndPodIDFile are almost identical.
-// It may be worth looking into generalizing the two a bit more and share code but time is scarce and
-// we only live once.
-
-// CheckAllLatestAndCIDFile checks that --all and --latest are used correctly.
-// If cidfile is set, also check for the --cidfile flag.
+// CheckAllLatestAndCIDFile checks that --all and --latest are used correctly for containers and pods
+// If idFileFlag is set is set, also checks for the --cidfile or --pod-id-file flag.
+// Note: this has been deprecated, use CheckAllLatestAndIDFile instead
func CheckAllLatestAndCIDFile(c *cobra.Command, args []string, ignoreArgLen bool, cidfile bool) error {
- var specifiedLatest bool
- argLen := len(args)
- if !registry.IsRemote() {
- specifiedLatest, _ = c.Flags().GetBool("latest")
- if c.Flags().Lookup("all") == nil || c.Flags().Lookup("latest") == nil {
- if !cidfile {
- return errors.New("unable to lookup values for 'latest' or 'all'")
- } else if c.Flags().Lookup("cidfile") == nil {
- return errors.New("unable to lookup values for 'latest', 'all' or 'cidfile'")
- }
- }
- }
-
- specifiedAll, _ := c.Flags().GetBool("all")
- specifiedCIDFile := false
- if cid, _ := c.Flags().GetStringArray("cidfile"); len(cid) > 0 {
- specifiedCIDFile = true
- }
-
- if specifiedCIDFile && (specifiedAll || specifiedLatest) {
- return errors.Errorf("--all, --latest and --cidfile cannot be used together")
- } else if specifiedAll && specifiedLatest {
- return errors.Errorf("--all and --latest cannot be used together")
- }
-
- if (argLen > 0) && specifiedAll {
- return errors.Errorf("no arguments are needed with --all")
- }
-
- if ignoreArgLen {
- return nil
- }
-
- if argLen > 0 {
- if specifiedLatest {
- return errors.Errorf("--latest and containers cannot be used together")
- } else if cidfile && (specifiedLatest || specifiedCIDFile) {
- return errors.Errorf("no arguments are needed with --latest or --cidfile")
- }
- }
-
- if specifiedCIDFile {
- return nil
- }
-
- if argLen < 1 && !specifiedAll && !specifiedLatest && !specifiedCIDFile {
- return errors.Errorf("you must provide at least one name or id")
- }
- return nil
+ return CheckAllLatestAndIDFile(c, args, ignoreArgLen, "cidfile")
}
// CheckAllLatestAndPodIDFile checks that --all and --latest are used correctly.
// If withIDFile is set, also check for the --pod-id-file flag.
+// Note: this has been deprecated, use CheckAllLatestAndIDFile instead
func CheckAllLatestAndPodIDFile(c *cobra.Command, args []string, ignoreArgLen bool, withIDFile bool) error {
+ return CheckAllLatestAndIDFile(c, args, ignoreArgLen, "pod-id-file")
+}
+
+// CheckAllLatestAndIDFile checks that --all and --latest are used correctly for containers and pods
+// If idFileFlag is set is set, also checks for the --cidfile or --pod-id-file flag.
+func CheckAllLatestAndIDFile(c *cobra.Command, args []string, ignoreArgLen bool, idFileFlag string) error {
var specifiedLatest bool
argLen := len(args)
if !registry.IsRemote() {
- // remote clients have no latest flag
specifiedLatest, _ = c.Flags().GetBool("latest")
if c.Flags().Lookup("all") == nil || c.Flags().Lookup("latest") == nil {
- if !withIDFile {
- return errors.New("unable to lookup values for 'latest' or 'all'")
- } else if c.Flags().Lookup("pod-id-file") == nil {
- return errors.New("unable to lookup values for 'latest', 'all' or 'pod-id-file'")
+ if idFileFlag == "" {
+ return errors.New("unable to look up values for 'latest' or 'all'")
+ } else if c.Flags().Lookup(idFileFlag) == nil {
+ return fmt.Errorf("unable to look up values for 'latest', 'all', or '%s'", idFileFlag)
}
}
}
specifiedAll, _ := c.Flags().GetBool("all")
- specifiedPodIDFile := false
- if pid, _ := c.Flags().GetStringArray("pod-id-file"); len(pid) > 0 {
- specifiedPodIDFile = true
+ specifiedIDFile := false
+ if cid, _ := c.Flags().GetStringArray(idFileFlag); len(cid) > 0 {
+ specifiedIDFile = true
}
- if specifiedPodIDFile && (specifiedAll || specifiedLatest) {
- return errors.Errorf("--all, --latest and --pod-id-file cannot be used together")
+ if specifiedIDFile && (specifiedAll || specifiedLatest) {
+ return fmt.Errorf("--all, --latest, and --%s cannot be used together", idFileFlag)
} else if specifiedAll && specifiedLatest {
- return errors.Errorf("--all and --latest cannot be used together")
+ return errors.New("--all and --latest cannot be used together")
}
if (argLen > 0) && specifiedAll {
- return errors.Errorf("no arguments are needed with --all")
+ return errors.New("no arguments are needed with --all")
}
if ignoreArgLen {
@@ -147,18 +102,18 @@ func CheckAllLatestAndPodIDFile(c *cobra.Command, args []string, ignoreArgLen bo
if argLen > 0 {
if specifiedLatest {
- return errors.Errorf("--latest and pods cannot be used together")
- } else if withIDFile && (specifiedLatest || specifiedPodIDFile) {
- return errors.Errorf("no arguments are needed with --latest or --pod-id-file")
+ return errors.New("--latest and containers cannot be used together")
+ } else if idFileFlag != "" && (specifiedLatest || specifiedIDFile) {
+ return fmt.Errorf("no arguments are needed with --latest or --%s", idFileFlag)
}
}
- if specifiedPodIDFile {
+ if specifiedIDFile {
return nil
}
- if argLen < 1 && !specifiedAll && !specifiedLatest && !specifiedPodIDFile {
- return errors.Errorf("you must provide at least one name or id")
+ if argLen < 1 && !specifiedAll && !specifiedLatest && !specifiedIDFile {
+ return errors.New("you must provide at least one name or id")
}
return nil
}
diff --git a/cmd/podman/volumes/create.go b/cmd/podman/volumes/create.go
index 1668c72de..0d19fab47 100644
--- a/cmd/podman/volumes/create.go
+++ b/cmd/podman/volumes/create.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/parse"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -17,6 +16,7 @@ var (
createCommand = &cobra.Command{
Use: "create [options] [NAME]",
+ Args: cobra.MaximumNArgs(1),
Short: "Create a new volume",
Long: createDescription,
RunE: create,
@@ -59,19 +59,16 @@ func create(cmd *cobra.Command, args []string) error {
var (
err error
)
- if len(args) > 1 {
- return errors.Errorf("too many arguments, create takes at most 1 argument")
- }
if len(args) > 0 {
createOpts.Name = args[0]
}
createOpts.Label, err = parse.GetAllLabels([]string{}, opts.Label)
if err != nil {
- return errors.Wrapf(err, "unable to process labels")
+ return fmt.Errorf("unable to process labels: %w", err)
}
createOpts.Options, err = parse.GetAllLabels([]string{}, opts.Opts)
if err != nil {
- return errors.Wrapf(err, "unable to process options")
+ return fmt.Errorf("unable to process options: %w", err)
}
response, err := registry.ContainerEngine().VolumeCreate(context.Background(), createOpts)
if err != nil {
diff --git a/cmd/podman/volumes/export.go b/cmd/podman/volumes/export.go
index 5086323f9..f9e08be87 100644
--- a/cmd/podman/volumes/export.go
+++ b/cmd/podman/volumes/export.go
@@ -2,14 +2,15 @@ package volumes
import (
"context"
+ "errors"
"fmt"
"github.com/containers/common/pkg/completion"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
+ "github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/utils"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -58,10 +59,13 @@ func export(cmd *cobra.Command, args []string) error {
return errors.New("expects output path, use --output=[path]")
}
inspectOpts.Type = common.VolumeType
- volumeData, _, err := containerEngine.VolumeInspect(ctx, args, inspectOpts)
+ volumeData, errs, err := containerEngine.VolumeInspect(ctx, args, inspectOpts)
if err != nil {
return err
}
+ if len(errs) > 0 {
+ return errorhandling.JoinErrors(errs)
+ }
if len(volumeData) < 1 {
return errors.New("no volume data found")
}
diff --git a/cmd/podman/volumes/import.go b/cmd/podman/volumes/import.go
index 988c5536d..8f3c7f27e 100644
--- a/cmd/podman/volumes/import.go
+++ b/cmd/podman/volumes/import.go
@@ -1,6 +1,7 @@
package volumes
import (
+ "errors"
"fmt"
"os"
@@ -8,8 +9,8 @@ import (
"github.com/containers/podman/v4/cmd/podman/parse"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/domain/entities"
+ "github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/utils"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -60,10 +61,14 @@ func importVol(cmd *cobra.Command, args []string) error {
}
inspectOpts.Type = common.VolumeType
- volumeData, _, err := containerEngine.VolumeInspect(ctx, volumes, inspectOpts)
+ inspectOpts.Type = common.VolumeType
+ volumeData, errs, err := containerEngine.VolumeInspect(ctx, volumes, inspectOpts)
if err != nil {
return err
}
+ if len(errs) > 0 {
+ return errorhandling.JoinErrors(errs)
+ }
if len(volumeData) < 1 {
return errors.New("no volume data found")
}
diff --git a/cmd/podman/volumes/inspect.go b/cmd/podman/volumes/inspect.go
index 7cf363f36..68ba2976a 100644
--- a/cmd/podman/volumes/inspect.go
+++ b/cmd/podman/volumes/inspect.go
@@ -1,12 +1,13 @@
package volumes
import (
+ "errors"
+
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/inspect"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
diff --git a/cmd/podman/volumes/list.go b/cmd/podman/volumes/list.go
index c14cf08bd..06118513d 100644
--- a/cmd/podman/volumes/list.go
+++ b/cmd/podman/volumes/list.go
@@ -2,6 +2,7 @@ package volumes
import (
"context"
+ "errors"
"fmt"
"os"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -119,7 +119,7 @@ func outputTemplate(cmd *cobra.Command, responses []*entities.VolumeListReport)
if !(noHeading || cliOpts.Quiet || cmd.Flag("format").Changed) {
if err := tmpl.Execute(w, headers); err != nil {
- return errors.Wrapf(err, "failed to write report column headers")
+ return fmt.Errorf("failed to write report column headers: %w", err)
}
}
return tmpl.Execute(w, responses)
diff --git a/cmd/podman/volumes/reload.go b/cmd/podman/volumes/reload.go
new file mode 100644
index 000000000..d0d76fb88
--- /dev/null
+++ b/cmd/podman/volumes/reload.go
@@ -0,0 +1,52 @@
+package volumes
+
+import (
+ "fmt"
+
+ "github.com/containers/common/pkg/completion"
+ "github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/cmd/podman/utils"
+ "github.com/containers/podman/v4/cmd/podman/validate"
+ "github.com/spf13/cobra"
+)
+
+var (
+ reloadDescription = `Check all configured volume plugins and update the libpod database with all available volumes.
+
+ Existing volumes are also removed from the database when they are no longer present in the plugin.`
+ reloadCommand = &cobra.Command{
+ Use: "reload",
+ Args: validate.NoArgs,
+ Short: "reload all volumes from volume plugins",
+ Long: reloadDescription,
+ RunE: reload,
+ ValidArgsFunction: completion.AutocompleteNone,
+ }
+)
+
+func init() {
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Command: reloadCommand,
+ Parent: volumeCmd,
+ })
+}
+
+func reload(cmd *cobra.Command, args []string) error {
+ report, err := registry.ContainerEngine().VolumeReload(registry.Context())
+ if err != nil {
+ return err
+ }
+ printReload("Added", report.Added)
+ printReload("Removed", report.Removed)
+ errs := (utils.OutputErrors)(report.Errors)
+ return errs.PrintErrors()
+}
+
+func printReload(typ string, values []string) {
+ if len(values) > 0 {
+ fmt.Println(typ + ":")
+ for _, name := range values {
+ fmt.Println(name)
+ }
+ }
+}
diff --git a/cmd/podman/volumes/rm.go b/cmd/podman/volumes/rm.go
index 2012b7d3a..c160b8623 100644
--- a/cmd/podman/volumes/rm.go
+++ b/cmd/podman/volumes/rm.go
@@ -2,6 +2,7 @@ package volumes
import (
"context"
+ "errors"
"fmt"
"strings"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -80,15 +80,9 @@ func rm(cmd *cobra.Command, args []string) error {
}
func setExitCode(err error) {
- cause := errors.Cause(err)
- switch {
- case cause == define.ErrNoSuchVolume:
+ if errors.Is(err, define.ErrNoSuchVolume) || strings.Contains(err.Error(), define.ErrNoSuchVolume.Error()) {
registry.SetExitCode(1)
- case strings.Contains(cause.Error(), define.ErrNoSuchVolume.Error()):
- registry.SetExitCode(1)
- case cause == define.ErrVolumeBeingUsed:
- registry.SetExitCode(2)
- case strings.Contains(cause.Error(), define.ErrVolumeBeingUsed.Error()):
+ } else if errors.Is(err, define.ErrVolumeBeingUsed) || strings.Contains(err.Error(), define.ErrVolumeBeingUsed.Error()) {
registry.SetExitCode(2)
}
}
diff --git a/cmd/rootlessport/main.go b/cmd/rootlessport/main.go
index 5bd35a985..5410cd14a 100644
--- a/cmd/rootlessport/main.go
+++ b/cmd/rootlessport/main.go
@@ -6,6 +6,7 @@ package main
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -18,7 +19,6 @@ import (
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/pkg/rootlessport"
- "github.com/pkg/errors"
rkport "github.com/rootless-containers/rootlesskit/pkg/port"
rkbuiltin "github.com/rootless-containers/rootlesskit/pkg/port/builtin"
rkportutil "github.com/rootless-containers/rootlesskit/pkg/port/portutil"
@@ -226,8 +226,8 @@ outer:
// https://github.com/containers/podman/issues/11248
// Copy /dev/null to stdout and stderr to prevent SIGPIPE errors
if f, err := os.OpenFile("/dev/null", os.O_WRONLY, 0755); err == nil {
- unix.Dup2(int(f.Fd()), 1) // nolint:errcheck
- unix.Dup2(int(f.Fd()), 2) // nolint:errcheck
+ unix.Dup2(int(f.Fd()), 1) //nolint:errcheck
+ unix.Dup2(int(f.Fd()), 2) //nolint:errcheck
f.Close()
}
// write and close ReadyFD (convention is same as slirp4netns --ready-fd)
@@ -269,16 +269,16 @@ func handler(ctx context.Context, conn io.Reader, pm rkport.Manager) error {
dec := json.NewDecoder(conn)
err := dec.Decode(&childIP)
if err != nil {
- return errors.Wrap(err, "rootless port failed to decode ports")
+ return fmt.Errorf("rootless port failed to decode ports: %w", err)
}
portStatus, err := pm.ListPorts(ctx)
if err != nil {
- return errors.Wrap(err, "rootless port failed to list ports")
+ return fmt.Errorf("rootless port failed to list ports: %w", err)
}
for _, status := range portStatus {
err = pm.RemovePort(ctx, status.ID)
if err != nil {
- return errors.Wrap(err, "rootless port failed to remove port")
+ return fmt.Errorf("rootless port failed to remove port: %w", err)
}
}
// add the ports with the new child IP
@@ -287,7 +287,7 @@ func handler(ctx context.Context, conn io.Reader, pm rkport.Manager) error {
status.Spec.ChildIP = childIP
_, err = pm.AddPort(ctx, status.Spec)
if err != nil {
- return errors.Wrap(err, "rootless port failed to add port")
+ return fmt.Errorf("rootless port failed to add port: %w", err)
}
}
return nil
diff --git a/cmd/rootlessport/wsl_test.go b/cmd/rootlessport/wsl_test.go
index 83d7e3717..2c95251cd 100644
--- a/cmd/rootlessport/wsl_test.go
+++ b/cmd/rootlessport/wsl_test.go
@@ -20,7 +20,7 @@ type SpecData struct {
}
func TestDualStackSplit(t *testing.T) {
- //nolint
+ //nolint:revive,stylecheck
const (
IP4_ALL = "0.0.0.0"
IP4__LO = "127.0.0.1"
diff --git a/cmd/winpath/main.go b/cmd/winpath/main.go
index 6fbe72837..bb57e39de 100644
--- a/cmd/winpath/main.go
+++ b/cmd/winpath/main.go
@@ -12,6 +12,7 @@ import (
"syscall"
"unsafe"
+ "golang.org/x/sys/windows"
"golang.org/x/sys/windows/registry"
)
@@ -26,6 +27,7 @@ const (
Environment = "Environment"
Add operation = iota
Remove
+ Open
NotSpecified
)
@@ -37,6 +39,8 @@ func main() {
op = Add
case "remove":
op = Remove
+ case "open":
+ op = Open
}
}
@@ -46,6 +50,14 @@ func main() {
os.Exit(ERR_BAD_ARGS)
}
+ // Hidden operation as a workaround for the installer
+ if op == Open && len(os.Args) > 2 {
+ if err := winOpenFile(os.Args[2]); err != nil {
+ os.Exit(OPERATION_FAILED)
+ }
+ os.Exit(0)
+ }
+
if err := modify(op); err != nil {
os.Exit(OPERATION_FAILED)
}
@@ -119,7 +131,7 @@ func removePathFromRegistry(path string) error {
k, err := registry.OpenKey(registry.CURRENT_USER, Environment, registry.READ|registry.WRITE)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
- // Nothing to cleanup, the Environment registry key does not exist.
+ // Nothing to clean up, the Environment registry key does not exist.
return nil
}
return err
@@ -182,3 +194,9 @@ func alert(caption string) int {
return int(ret)
}
+
+func winOpenFile(file string) error {
+ verb, _ := syscall.UTF16PtrFromString("open")
+ fileW, _ := syscall.UTF16PtrFromString(file)
+ return windows.ShellExecute(0, verb, fileW, nil, nil, windows.SW_NORMAL)
+}
diff --git a/commands-demo.md b/commands-demo.md
index ececf0a22..50e2873b2 100644
--- a/commands-demo.md
+++ b/commands-demo.md
@@ -11,7 +11,7 @@
| [podman-commit(1)](https://podman.readthedocs.io/en/latest/markdown/podman-commit.1.html) | Create new image based on the changed container |
| [podman-container(1)](https://podman.readthedocs.io/en/latest/managecontainers.html) | Manage Containers |
| [podman-container-checkpoint(1)](https://podman.readthedocs.io/en/latest/markdown/podman-container-checkpoint.1.html) | Checkpoints one or more running containers |
-| [podman-container-cleanup(1)](https://podman.readthedocs.io/en/latest/markdown/podman-container-cleanup.1.html) | Cleanup the container's network and mountpoints |
+| [podman-container-cleanup(1)](https://podman.readthedocs.io/en/latest/markdown/podman-container-cleanup.1.html) | Clean up the container's network and mountpoints |
| [podman-container-exists(1)](https://podman.readthedocs.io/en/latest/markdown/podman-container-exists.1.html) | Check if an container exists in local storage |
| [podman-container-prune(1)](https://podman.readthedocs.io/en/latest/markdown/podman-container-prune.1.html) | Remove all stopped containers from local storage |
| [podman-container-restore(1)](https://podman.readthedocs.io/en/latest/markdown/podman-container-restore.1.html) | Restores one or more containers from a checkpoint |
diff --git a/completions/bash/podman b/completions/bash/podman
index c7171a9cc..6e6be35a7 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -56,7 +56,7 @@ __podman_get_completion_results() {
directive=0
fi
__podman_debug "The completion directive is: ${directive}"
- __podman_debug "The completions are: ${out[*]}"
+ __podman_debug "The completions are: ${out}"
}
__podman_process_completion_results() {
@@ -89,13 +89,18 @@ __podman_process_completion_results() {
fi
fi
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __podman_extract_activeHelp
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
- # Do not use quotes around the $out variable or else newline
+ # Do not use quotes around the $completions variable or else newline
# characters will be kept.
- for filter in ${out[*]}; do
+ for filter in ${completions[*]}; do
fullFilter+="$filter|"
done
@@ -107,7 +112,7 @@ __podman_process_completion_results() {
# Use printf to strip any trailing newline
local subdir
- subdir=$(printf "%s" "${out[0]}")
+ subdir=$(printf "%s" "${completions[0]}")
if [ -n "$subdir" ]; then
__podman_debug "Listing directories in $subdir"
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
@@ -121,6 +126,43 @@ __podman_process_completion_results() {
__podman_handle_special_char "$cur" :
__podman_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ if [ ${#activeHelp} -ne 0 ]; then
+ printf "\n";
+ printf "%s\n" "${activeHelp[@]}"
+ printf "\n"
+
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%s" "${COMP_LINE[@]}"
+ fi
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__podman_extract_activeHelp() {
+ local activeHelpMarker="_activeHelp_ "
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
+ comp=${comp:endIndex}
+ __podman_debug "ActiveHelp found: $comp"
+ if [ -n "$comp" ]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done < <(printf "%s\n" "${out}")
}
__podman_handle_completion_types() {
@@ -132,17 +174,16 @@ __podman_handle_completion_types() {
# If the user requested inserting one completion at a time, or all
# completions at once on the command-line we must remove the descriptions.
# https://github.com/spf13/cobra/issues/1508
- local tab comp
- tab=$(printf '\t')
+ local tab=$'\t' comp
while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
# Strip any description
comp=${comp%%$tab*}
# Only consider the completions that match
- comp=$(compgen -W "$comp" -- "$cur")
- if [ -n "$comp" ]; then
+ if [[ $comp == "$cur"* ]]; then
COMPREPLY+=("$comp")
fi
- done < <(printf "%s\n" "${out[@]}")
+ done < <(printf "%s\n" "${completions[@]}")
;;
*)
@@ -153,44 +194,37 @@ __podman_handle_completion_types() {
}
__podman_handle_standard_completion_case() {
- local tab comp
- tab=$(printf '\t')
+ local tab=$'\t' comp
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
+ return 0
+ fi
local longest=0
+ local compline
# Look for the longest completion so that we can format things nicely
- while IFS='' read -r comp; do
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
# Strip any description before checking the length
- comp=${comp%%$tab*}
+ comp=${compline%%$tab*}
# Only consider the completions that match
- comp=$(compgen -W "$comp" -- "$cur")
+ [[ $comp == "$cur"* ]] || continue
+ COMPREPLY+=("$compline")
if ((${#comp}>longest)); then
longest=${#comp}
fi
- done < <(printf "%s\n" "${out[@]}")
-
- local completions=()
- while IFS='' read -r comp; do
- if [ -z "$comp" ]; then
- continue
- fi
-
- __podman_debug "Original comp: $comp"
- comp="$(__podman_format_comp_descriptions "$comp" "$longest")"
- __podman_debug "Final comp: $comp"
- completions+=("$comp")
- done < <(printf "%s\n" "${out[@]}")
-
- while IFS='' read -r comp; do
- COMPREPLY+=("$comp")
- done < <(compgen -W "${completions[*]}" -- "$cur")
+ done < <(printf "%s\n" "${completions[@]}")
# If there is a single completion left, remove the description text
if [ ${#COMPREPLY[*]} -eq 1 ]; then
__podman_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
- comp="${COMPREPLY[0]%% *}"
+ comp="${COMPREPLY[0]%%$tab*}"
__podman_debug "Removed description from single completion, which is now: ${comp}"
- COMPREPLY=()
- COMPREPLY+=("$comp")
+ COMPREPLY[0]=$comp
+ else # Format the descriptions
+ __podman_format_comp_descriptions $longest
fi
}
@@ -209,45 +243,48 @@ __podman_handle_special_char()
__podman_format_comp_descriptions()
{
- local tab
- tab=$(printf '\t')
- local comp="$1"
- local longest=$2
-
- # Properly format the description string which follows a tab character if there is one
- if [[ "$comp" == *$tab* ]]; then
- desc=${comp#*$tab}
- comp=${comp%%$tab*}
-
- # $COLUMNS stores the current shell width.
- # Remove an extra 4 because we add 2 spaces and 2 parentheses.
- maxdesclength=$(( COLUMNS - longest - 4 ))
-
- # Make sure we can fit a description of at least 8 characters
- # if we are to align the descriptions.
- if [[ $maxdesclength -gt 8 ]]; then
- # Add the proper number of spaces to align the descriptions
- for ((i = ${#comp} ; i < longest ; i++)); do
- comp+=" "
- done
- else
- # Don't pad the descriptions so we can fit more text after the completion
- maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
- fi
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __podman_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if [[ $maxdesclength -gt 8 ]]; then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
- # If there is enough space for any description text,
- # truncate the descriptions that are too long for the shell width
- if [ $maxdesclength -gt 0 ]; then
- if [ ${#desc} -gt $maxdesclength ]; then
- desc=${desc:0:$(( maxdesclength - 1 ))}
- desc+="…"
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if [ $maxdesclength -gt 0 ]; then
+ if [ ${#desc} -gt $maxdesclength ]; then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
fi
- comp+=" ($desc)"
+ COMPREPLY[ci]=$comp
+ __podman_debug "Final comp: $comp"
fi
- fi
-
- # Must use printf to escape all special characters
- printf "%q" "${comp}"
+ done
}
__start_podman()
diff --git a/completions/bash/podman-remote b/completions/bash/podman-remote
index b5150e208..b8343c270 100644
--- a/completions/bash/podman-remote
+++ b/completions/bash/podman-remote
@@ -56,7 +56,7 @@ __podman-remote_get_completion_results() {
directive=0
fi
__podman-remote_debug "The completion directive is: ${directive}"
- __podman-remote_debug "The completions are: ${out[*]}"
+ __podman-remote_debug "The completions are: ${out}"
}
__podman-remote_process_completion_results() {
@@ -89,13 +89,18 @@ __podman-remote_process_completion_results() {
fi
fi
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __podman-remote_extract_activeHelp
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
- # Do not use quotes around the $out variable or else newline
+ # Do not use quotes around the $completions variable or else newline
# characters will be kept.
- for filter in ${out[*]}; do
+ for filter in ${completions[*]}; do
fullFilter+="$filter|"
done
@@ -107,7 +112,7 @@ __podman-remote_process_completion_results() {
# Use printf to strip any trailing newline
local subdir
- subdir=$(printf "%s" "${out[0]}")
+ subdir=$(printf "%s" "${completions[0]}")
if [ -n "$subdir" ]; then
__podman-remote_debug "Listing directories in $subdir"
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
@@ -121,6 +126,43 @@ __podman-remote_process_completion_results() {
__podman-remote_handle_special_char "$cur" :
__podman-remote_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ if [ ${#activeHelp} -ne 0 ]; then
+ printf "\n";
+ printf "%s\n" "${activeHelp[@]}"
+ printf "\n"
+
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%s" "${COMP_LINE[@]}"
+ fi
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__podman-remote_extract_activeHelp() {
+ local activeHelpMarker="_activeHelp_ "
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
+ comp=${comp:endIndex}
+ __podman-remote_debug "ActiveHelp found: $comp"
+ if [ -n "$comp" ]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done < <(printf "%s\n" "${out}")
}
__podman-remote_handle_completion_types() {
@@ -132,17 +174,16 @@ __podman-remote_handle_completion_types() {
# If the user requested inserting one completion at a time, or all
# completions at once on the command-line we must remove the descriptions.
# https://github.com/spf13/cobra/issues/1508
- local tab comp
- tab=$(printf '\t')
+ local tab=$'\t' comp
while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
# Strip any description
comp=${comp%%$tab*}
# Only consider the completions that match
- comp=$(compgen -W "$comp" -- "$cur")
- if [ -n "$comp" ]; then
+ if [[ $comp == "$cur"* ]]; then
COMPREPLY+=("$comp")
fi
- done < <(printf "%s\n" "${out[@]}")
+ done < <(printf "%s\n" "${completions[@]}")
;;
*)
@@ -153,44 +194,37 @@ __podman-remote_handle_completion_types() {
}
__podman-remote_handle_standard_completion_case() {
- local tab comp
- tab=$(printf '\t')
+ local tab=$'\t' comp
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
+ return 0
+ fi
local longest=0
+ local compline
# Look for the longest completion so that we can format things nicely
- while IFS='' read -r comp; do
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
# Strip any description before checking the length
- comp=${comp%%$tab*}
+ comp=${compline%%$tab*}
# Only consider the completions that match
- comp=$(compgen -W "$comp" -- "$cur")
+ [[ $comp == "$cur"* ]] || continue
+ COMPREPLY+=("$compline")
if ((${#comp}>longest)); then
longest=${#comp}
fi
- done < <(printf "%s\n" "${out[@]}")
-
- local completions=()
- while IFS='' read -r comp; do
- if [ -z "$comp" ]; then
- continue
- fi
-
- __podman-remote_debug "Original comp: $comp"
- comp="$(__podman-remote_format_comp_descriptions "$comp" "$longest")"
- __podman-remote_debug "Final comp: $comp"
- completions+=("$comp")
- done < <(printf "%s\n" "${out[@]}")
-
- while IFS='' read -r comp; do
- COMPREPLY+=("$comp")
- done < <(compgen -W "${completions[*]}" -- "$cur")
+ done < <(printf "%s\n" "${completions[@]}")
# If there is a single completion left, remove the description text
if [ ${#COMPREPLY[*]} -eq 1 ]; then
__podman-remote_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
- comp="${COMPREPLY[0]%% *}"
+ comp="${COMPREPLY[0]%%$tab*}"
__podman-remote_debug "Removed description from single completion, which is now: ${comp}"
- COMPREPLY=()
- COMPREPLY+=("$comp")
+ COMPREPLY[0]=$comp
+ else # Format the descriptions
+ __podman-remote_format_comp_descriptions $longest
fi
}
@@ -209,45 +243,48 @@ __podman-remote_handle_special_char()
__podman-remote_format_comp_descriptions()
{
- local tab
- tab=$(printf '\t')
- local comp="$1"
- local longest=$2
-
- # Properly format the description string which follows a tab character if there is one
- if [[ "$comp" == *$tab* ]]; then
- desc=${comp#*$tab}
- comp=${comp%%$tab*}
-
- # $COLUMNS stores the current shell width.
- # Remove an extra 4 because we add 2 spaces and 2 parentheses.
- maxdesclength=$(( COLUMNS - longest - 4 ))
-
- # Make sure we can fit a description of at least 8 characters
- # if we are to align the descriptions.
- if [[ $maxdesclength -gt 8 ]]; then
- # Add the proper number of spaces to align the descriptions
- for ((i = ${#comp} ; i < longest ; i++)); do
- comp+=" "
- done
- else
- # Don't pad the descriptions so we can fit more text after the completion
- maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
- fi
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __podman-remote_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if [[ $maxdesclength -gt 8 ]]; then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
- # If there is enough space for any description text,
- # truncate the descriptions that are too long for the shell width
- if [ $maxdesclength -gt 0 ]; then
- if [ ${#desc} -gt $maxdesclength ]; then
- desc=${desc:0:$(( maxdesclength - 1 ))}
- desc+="…"
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if [ $maxdesclength -gt 0 ]; then
+ if [ ${#desc} -gt $maxdesclength ]; then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
fi
- comp+=" ($desc)"
+ COMPREPLY[ci]=$comp
+ __podman-remote_debug "Final comp: $comp"
fi
- fi
-
- # Must use printf to escape all special characters
- printf "%q" "${comp}"
+ done
}
__start_podman-remote()
diff --git a/completions/fish/podman-remote.fish b/completions/fish/podman-remote.fish
index bcfacbb00..67c964133 100644
--- a/completions/fish/podman-remote.fish
+++ b/completions/fish/podman-remote.fish
@@ -18,7 +18,8 @@ function __podman_remote_perform_completion
__podman_remote_debug "args: $args"
__podman_remote_debug "last arg: $lastArg"
- set -l requestComp "$args[1] __complete $args[2..-1] $lastArg"
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "PODMAN_REMOTE_ACTIVE_HELP=0 $args[1] __complete $args[2..-1] $lastArg"
__podman_remote_debug "Calling $requestComp"
set -l results (eval $requestComp 2> /dev/null)
diff --git a/completions/fish/podman.fish b/completions/fish/podman.fish
index 6394535a9..be18c45cd 100644
--- a/completions/fish/podman.fish
+++ b/completions/fish/podman.fish
@@ -18,7 +18,8 @@ function __podman_perform_completion
__podman_debug "args: $args"
__podman_debug "last arg: $lastArg"
- set -l requestComp "$args[1] __complete $args[2..-1] $lastArg"
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "PODMAN_ACTIVE_HELP=0 $args[1] __complete $args[2..-1] $lastArg"
__podman_debug "Calling $requestComp"
set -l results (eval $requestComp 2> /dev/null)
diff --git a/completions/powershell/podman-remote.ps1 b/completions/powershell/podman-remote.ps1
index 2edc79ffb..d810ab8dd 100644
--- a/completions/powershell/podman-remote.ps1
+++ b/completions/powershell/podman-remote.ps1
@@ -44,6 +44,7 @@ Register-ArgumentCompleter -CommandName 'podman-remote' -ScriptBlock {
# Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
$Program,$Arguments = $Command.Split(" ",2)
+
$RequestComp="$Program __complete $Arguments"
__podman-remote_debug "RequestComp: $RequestComp"
@@ -73,11 +74,13 @@ Register-ArgumentCompleter -CommandName 'podman-remote' -ScriptBlock {
}
__podman-remote_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ $env:PODMAN_REMOTE_ACTIVE_HELP=0
+
#call the command store the output in $out and redirect stderr and stdout to null
# $Out is an array contains each line per element
Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
-
# get directive from last line
[int]$Directive = $Out[-1].TrimStart(':')
if ($Directive -eq "") {
diff --git a/completions/powershell/podman.ps1 b/completions/powershell/podman.ps1
index 1cd89d0a0..4d94b6fe8 100644
--- a/completions/powershell/podman.ps1
+++ b/completions/powershell/podman.ps1
@@ -44,6 +44,7 @@ Register-ArgumentCompleter -CommandName 'podman' -ScriptBlock {
# Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
$Program,$Arguments = $Command.Split(" ",2)
+
$RequestComp="$Program __complete $Arguments"
__podman_debug "RequestComp: $RequestComp"
@@ -73,11 +74,13 @@ Register-ArgumentCompleter -CommandName 'podman' -ScriptBlock {
}
__podman_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ $env:PODMAN_ACTIVE_HELP=0
+
#call the command store the output in $out and redirect stderr and stdout to null
# $Out is an array contains each line per element
Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
-
# get directive from last line
[int]$Directive = $Out[-1].TrimStart(':')
if ($Directive -eq "") {
diff --git a/completions/zsh/_podman b/completions/zsh/_podman
index 7c3d6faf3..e2d086108 100644
--- a/completions/zsh/_podman
+++ b/completions/zsh/_podman
@@ -1,4 +1,4 @@
-#compdef _podman podman
+#compdef podman
# zsh completion for podman -*- shell-script -*-
@@ -86,7 +86,24 @@ _podman()
return
fi
+ local activeHelpMarker="_activeHelp_ "
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __podman_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __podman_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
# The description is preceded by a TAB character.
@@ -94,7 +111,7 @@ _podman()
# We first need to escape any : as part of the completion itself.
comp=${comp//:/\\:}
- local tab=$(printf '\t')
+ local tab="$(printf '\t')"
comp=${comp//$tab/:}
__podman_debug "Adding completion: ${comp}"
@@ -103,6 +120,17 @@ _podman()
fi
done < <(printf "%s\n" "${out[@]}")
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __podman_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
__podman_debug "Activating nospace."
noSpace="-S ''"
diff --git a/completions/zsh/_podman-remote b/completions/zsh/_podman-remote
index a2d24af25..2d7e7a549 100644
--- a/completions/zsh/_podman-remote
+++ b/completions/zsh/_podman-remote
@@ -1,4 +1,4 @@
-#compdef _podman-remote podman-remote
+#compdef podman-remote
# zsh completion for podman-remote -*- shell-script -*-
@@ -86,7 +86,24 @@ _podman-remote()
return
fi
+ local activeHelpMarker="_activeHelp_ "
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __podman-remote_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __podman-remote_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
# The description is preceded by a TAB character.
@@ -94,7 +111,7 @@ _podman-remote()
# We first need to escape any : as part of the completion itself.
comp=${comp//:/\\:}
- local tab=$(printf '\t')
+ local tab="$(printf '\t')"
comp=${comp//$tab/:}
__podman-remote_debug "Adding completion: ${comp}"
@@ -103,6 +120,17 @@ _podman-remote()
fi
done < <(printf "%s\n" "${out[@]}")
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __podman-remote_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
__podman-remote_debug "Activating nospace."
noSpace="-S ''"
diff --git a/contrib/cirrus/CIModes.md b/contrib/cirrus/CIModes.md
new file mode 100644
index 000000000..8b1e33bb1
--- /dev/null
+++ b/contrib/cirrus/CIModes.md
@@ -0,0 +1,129 @@
+The following is a list (incomplete) of the primary contexts and runtime
+"modes" supported by podman CI. Note that there may be additional checks
+done regarding "skipping work" in the `runner.sh` script. This document
+only details the controls at the `.cirrus.yml` level.
+
+## Visualization
+
+The relationship between tasks can be incredibly hard to understand by
+staring at the YAML.
+[A tool exists](https://github.com/containers/automation/tree/main/cirrus-task-map)
+for producing a graph (flow-chart) of the `.cirrus.yml` file. A (possibly
+outdated) example of it's output can be seen below:
+
+![cirrus-task-map output](https://github.com/containers/podman/wiki/cirrus-map.svg)
+
+## Implementation notes
+
++ The `skip` conditional should never be used for tasks.
+ While it's arguably easier to read that `only_if`, it leads to a cluttered
+ status output that's harder to page through when reviewing PRs. As opposed
+ to `only_if` which will bypass creation of the task (at runtime) completely.
+ Also, by sticking to one conditional style, it's easer to re-use the YAML
+ statements across multiple tasks.
+
++ The only variables which can be used as part of conditions are defined by
+ Cirrus-CI.
+ [The list is documented](https://cirrus-ci.org/guide/writing-tasks/#environment-variables). Reference to any variables defined in YAML will **not** behave how
+ you expect, don't use them!
+
+* Somme Cirrus-CI defined variables contain non-empty values outside their
+ obvious context. For example, when running for a PR a task will have
+ `$CIRRUS_BRANCH` set to `pull/<number>`.
+
+* Conditions which use positive or negative regular-expressions have several
+ "flags" set: "Multi-line" and "Case-insensitive".
+
+## Testing
+
+Executing most of the modes can be mocked by forcing values for (otherwise)
+Cirrus-CI defined variables. For example `$CIRRUS_TAG`. As of the publishing
+of this document, it's not possible to override the behavior of `$CIRRUS_PR`.
+
+## Cirrus Task contexts and runtime modes
+
+### Intended general PR Tasks (*italic*: matrix)
++ ext_svc_check
++ automation
++ *build*
++ validate
++ bindings
++ swagger
++ consistency
++ *alt_build*
++ osx_alt_build
++ docker-py_test
++ *unit_test*
++ apiv2_test
++ *compose_test*
++ *local_integration_test*
++ *remote_integration_test*
++ *container_integration_test*
++ *rootless_integration_test*
++ *local_system_test*
++ *remote_system_test*
++ *rootless_remote_system_test*
++ *buildah_bud_test*
++ *rootless_system_test*
++ rootless_gitlab_test
++ *upgrade_test*
++ meta
++ success
++ artifacts
+
+### Intended for PR w/ "release" or "bump" in title:
++ (All the general PR tasks above)
++ release_test
+
+### Intended `[CI:DOCS]` PR Tasks:
++ ext_svc_check
++ automation
++ *build*
++ validate
++ swagger
++ consistency
++ meta
++ success
+
+### Intend `[CI:BUILD]` PR Tasks:
++ ext_svc_check
++ automation
++ *build*
++ validate
++ consistency
++ *alt_build*
++ osx_alt_build
++ test_image_build
++ meta
++ success
++ artifacts
+
+### Intended Branch tasks (and Cirrus-cron jobs, except "multiarch"):
++ ext_svc_check
++ *build*
++ swagger
++ *alt_build*
++ osx_alt_build
++ *local_system_test*
++ *remote_system_test*
++ *rootless_remote_system_test*
++ *rootless_system_test*
++ meta
++ success
++ artifacts
+
+### Intended for "multiarch" Cirrus-Cron (always a branch):
++ ext_svc_check
++ image_build
++ meta
++ success
+
+### Intended for new Tag tasks:
++ *build*
++ swagger
++ *alt_build*
++ osx_alt_build
++ meta
++ success
++ artifacts
++ release
diff --git a/contrib/cirrus/cirrus_yaml_test.py b/contrib/cirrus/cirrus_yaml_test.py
index a7fff8d3f..3968b8b1b 100755
--- a/contrib/cirrus/cirrus_yaml_test.py
+++ b/contrib/cirrus/cirrus_yaml_test.py
@@ -26,7 +26,7 @@ class TestCaseBase(unittest.TestCase):
class TestDependsOn(TestCaseBase):
ALL_TASK_NAMES = None
- SUCCESS_DEPS_EXCLUDE = set(['success', 'artifacts',
+ SUCCESS_DEPS_EXCLUDE = set(['success', 'artifacts', 'podman_machine',
'test_image_build', 'release', 'release_test'])
def setUp(self):
diff --git a/contrib/cirrus/ext_svc_check.sh b/contrib/cirrus/ext_svc_check.sh
index 92ac4e93a..146919c39 100755
--- a/contrib/cirrus/ext_svc_check.sh
+++ b/contrib/cirrus/ext_svc_check.sh
@@ -25,6 +25,23 @@ cat ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/required_host_ports.txt | \
fi
done
-# TODO: Pull images required during testing into /dev/null
+# Verify we can pull metadata from a few key testing images on quay.io
+# in the 'libpod' namespace. This is mostly aimed at validating the
+# quay.io service is up and responsive. Images were hand-picked with
+# egrep -ro 'quay.io/libpod/.+:latest' test | sort -u
+TEST_IMGS=(\
+ alpine:latest
+ busybox:latest
+ alpine_labels:latest
+ alpine_nginx:latest
+ alpine_healthcheck:latest
+ badhealthcheck:latest
+ cirros:latest
+)
-# TODO: Refresh DNF package-cache into /dev/null
+echo "Checking quay.io test image accessibility"
+for testimg in "${TEST_IMGS[@]}"; do
+ fqin="quay.io/libpod/$testimg"
+ echo " $fqin"
+ skopeo inspect --retry-times 5 "docker://$fqin" | jq . > /dev/null
+done
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 5d3e43c50..e7ea05867 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -135,6 +135,7 @@ setup_rootless() {
req_env_vars GOPATH GOSRC SECRET_ENV_RE
ROOTLESS_USER="${ROOTLESS_USER:-some${RANDOM}dude}"
+ ROOTLESS_UID=""
local rootless_uid
local rootless_gid
@@ -158,6 +159,7 @@ setup_rootless() {
cd $GOSRC || exit 1
# Guarantee independence from specific values
rootless_uid=$[RANDOM+1000]
+ ROOTLESS_UID=$rootless_uid
rootless_gid=$[RANDOM+1000]
msg "creating $rootless_uid:$rootless_gid $ROOTLESS_USER user"
groupadd -g $rootless_gid $ROOTLESS_USER
@@ -173,7 +175,7 @@ setup_rootless() {
ssh-keygen -t ed25519 -P "" -f "/home/$ROOTLESS_USER/.ssh/id_ed25519"
ssh-keygen -t rsa -P "" -f "/home/$ROOTLESS_USER/.ssh/id_rsa"
- msg "Setup authorized_keys"
+ msg "Set up authorized_keys"
cat $HOME/.ssh/*.pub /home/$ROOTLESS_USER/.ssh/*.pub >> $HOME/.ssh/authorized_keys
cat $HOME/.ssh/*.pub /home/$ROOTLESS_USER/.ssh/*.pub >> /home/$ROOTLESS_USER/.ssh/authorized_keys
@@ -186,9 +188,9 @@ setup_rootless() {
# never be any non-localhost connections made from tests (using strict-mode).
# If there are, it's either a security problem or a broken test, both of which
# we want to lead to test failures.
- msg " setup known_hosts for $USER"
+ msg " set up known_hosts for $USER"
ssh-keyscan localhost > /root/.ssh/known_hosts
- msg " setup known_hosts for $ROOTLESS_USER"
+ msg " set up known_hosts for $ROOTLESS_USER"
# Maintain access-permission consistency with all other .ssh files.
install -Z -m 700 -o $ROOTLESS_USER -g $ROOTLESS_USER \
/root/.ssh/known_hosts /home/$ROOTLESS_USER/.ssh/known_hosts
@@ -225,11 +227,6 @@ use_netavark() {
export NETWORK_BACKEND=netavark # needed for install_test_configs()
msg "Removing any/all CNI configuration"
rm -rvf /etc/cni/net.d/*
-
- # TODO: Remove this when netavark/aardvark-dns development slows down
- warn "Updating netavark/aardvark-dns to avoid frequent VM image rebuilds"
- # N/B: This is coming from updates-testing repo in F36
- lilto dnf update -y netavark aardvark-dns
}
# Remove all files provided by the distro version of podman.
diff --git a/contrib/cirrus/logformatter b/contrib/cirrus/logformatter
index e45f03df9..59969c3e7 100755
--- a/contrib/cirrus/logformatter
+++ b/contrib/cirrus/logformatter
@@ -190,6 +190,22 @@ END_HTML
print { $out_fh } "<h2>Synopsis</h2>\n<hr/>\n",
job_synopsis($test_name), "<hr/>\n";
+ # FOR DEBUGGING: dump environment, but in HTML comments to not clutter
+ # This is safe. There is a TOKEN envariable, but it's not sensitive.
+ # There are no sensitive/secret values in our execution environment,
+ # but we're careful anyway. $SECRET_ENV_RE is set in lib.sh
+ my $filter_re = $ENV{SECRET_ENV_RE} || 'ACCOUNT|GC[EP]|PASSW|SECRET|TOKEN';
+ $filter_re .= '|BASH_FUNC'; # These are long and un-useful
+
+ print { $out_fh } "<!-- Environment: -->\n";
+ for my $e (sort keys %ENV) {
+ next if $e =~ /$filter_re/;
+
+ my $val = escapeHTML($ENV{$e});
+ $val =~ s/--/-&#x002D;/g; # double dash not valid in comments
+ printf { $out_fh } "<!-- %-20s %s -->\n", $e, $val;
+ }
+
# State variables
my $previous_timestamp = ''; # timestamp of previous line
my $cirrus_task; # Cirrus task number, used for linking
@@ -538,27 +554,24 @@ END_HTML
# If Cirrus magic envariables are available, write a link to results.
# FIXME: it'd be so nice to make this a clickable live link.
#
- # STATIC_MAGIC_BLOB is the name of a google-storage bucket. It is
- # unlikely to change often, but if it does you will suddenly start
- # seeing errors when trying to view formatted logs:
- #
- # AccessDeniedAccess denied.Anonymous caller does not have storage.objects.get access to the Google Cloud Storage object.
- #
- # This happened in July 2020 when github.com/containers/libpod was
- # renamed to podman. If something like that ever happens again, you
- # will need to get the new magic blob value from:
- #
- # https://console.cloud.google.com/storage/browser?project=libpod-218412
+ # As of June 2022 we use the Cirrus API[1] as the source of our logs,
+ # instead of linking directly to googleapis.com. This will allow us
+ # to abstract cloud-specific details, so we can one day use Amazon cloud.
+ # See #14569 for more info.
#
- # You will also probably need to set the bucket Public by clicking on
- # the bucket name, then the Permissions tab. This is safe, since this
- # project is fully open-source.
- if ($have_formatted_log && $ENV{CIRRUS_TASK_ID}) {
- my $URL_BASE = "https://storage.googleapis.com";
- my $STATIC_MAGIC_BLOB = "cirrus-ci-6707778565701632-fcae48";
- my $ARTIFACT_NAME = "html";
-
- my $URL = "${URL_BASE}/${STATIC_MAGIC_BLOB}/artifacts/$ENV{CIRRUS_REPO_FULL_NAME}/$ENV{CIRRUS_TASK_ID}/${ARTIFACT_NAME}/${outfile}";
+ # [1] https://cirrus-ci.org/guide/writing-tasks/#latest-build-artifacts
+ if ($have_formatted_log && $ENV{CIRRUS_BUILD_ID} && $ENV{CIRRUS_TASK_NAME}) {
+ my $URL_BASE = "https://api.cirrus-ci.com";
+ my $build_id = $ENV{CIRRUS_BUILD_ID};
+ my $task_name = $ENV{CIRRUS_TASK_NAME};
+
+ # Escape spaces in task names ("int fedora 35 podman root etc")
+ $task_name =~ s/\s/%20/g;
+
+ # URL is long and cumbersome and duplicaty. The task name cannot be
+ # reduced; the file name could, but I choose to leave it because I
+ # sometimes download HTML logs and oh how I hate "log.html" filenames.
+ my $URL = "${URL_BASE}/v1/artifact/build/$build_id/$task_name/html/${outfile}";
print "\n\nAnnotated results:\n $URL\n";
}
diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh
index 83a81bd0a..762a3b501 100755
--- a/contrib/cirrus/runner.sh
+++ b/contrib/cirrus/runner.sh
@@ -142,7 +142,10 @@ exec_container() {
# Line-separated arguments which include shell-escaped special characters
declare -a envargs
while read -r var_val; do
- envargs+=("-e $var_val")
+ # Pass "-e VAR" on the command line, not "-e VAR=value". Podman can
+ # do a much better job of transmitting the value than we can,
+ # especially when value includes spaces.
+ envargs+=("-e" "$(awk -F= '{print $1}' <<<$var_val)")
done <<<"$(passthrough_envars)"
# VM Images and Container images are built using (nearly) identical operations.
@@ -303,10 +306,6 @@ function _run_altbuild() {
}
function _run_release() {
- # TODO: These tests should come from code external to the podman repo.
- # to allow test-changes (and re-runs) in the case of a correctable test
- # flaw or flake at release tag-push time. For now, the test is here
- # given its simplicity.
msg "podman info:"
bin/podman info
@@ -316,6 +315,11 @@ function _run_release() {
if [[ -n "$dev" ]]; then
die "Releases must never contain '-dev' in output of 'podman info' ($dev)"
fi
+
+ commit=$(bin/podman info --format='{{.Version.GitCommit}}' | tr -d '[:space:]')
+ if [[ -z "$commit" ]]; then
+ die "Releases must contain a non-empty Version.GitCommit in 'podman info'"
+ fi
msg "All OK"
}
@@ -375,6 +379,13 @@ dotest() {
|& logformatter
}
+_run_machine() {
+ # TODO: This is a manually-triggered task, if that ever changes need to
+ # add something like:
+ # _bail_if_test_can_be_skipped docs test/e2e test/system test/python
+ make localmachine |& logformatter
+}
+
# Optimization: will exit if the only PR diffs are under docs/ or tests/
# with the exception of any given arguments. E.g., don't run e2e or upgrade
# or bud tests if the only PR changes are in test/system.
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index f31cd6eeb..4952f8dd2 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -99,11 +99,28 @@ esac
if ((CONTAINER==0)); then # Not yet running inside a container
# Discovered reemergence of BFQ scheduler bug in kernel 5.8.12-200
# which causes a kernel panic when system is under heavy I/O load.
- # Previously discovered in F32beta and confirmed fixed. It's been
- # observed in F31 kernels as well. Deploy workaround for all VMs
- # to ensure a more stable I/O scheduler (elevator).
- echo "mq-deadline" > /sys/block/sda/queue/scheduler
- warn "I/O scheduler: $(cat /sys/block/sda/queue/scheduler)"
+ # Disable the I/O scheduler (a.k.a. elevator) for all environments,
+ # leaving optimization up to underlying storage infrastructure.
+ testfs="/" # mountpoint that experiences the most I/O during testing
+ msg "Querying block device owning partition hosting the '$testfs' filesystem"
+ # Need --nofsroot b/c btrfs appends subvolume label to `source` name
+ testdev=$(findmnt --canonicalize --noheadings --nofsroot \
+ --output source --mountpoint $testfs)
+ msg " found partition: '$testdev'"
+ testdisk=$(lsblk --noheadings --output pkname --paths $testdev)
+ msg " found block dev: '$testdisk'"
+ testsched="/sys/block/$(basename $testdisk)/queue/scheduler"
+ if [[ -n "$testdev" ]] && [[ -n "$testdisk" ]] && [[ -e "$testsched" ]]; then
+ msg " Found active I/O scheduler: $(cat $testsched)"
+ if [[ ! "$(<$testsched)" =~ \[none\] ]]; then
+ msg " Disabling elevator for '$testsched'"
+ echo "none" > "$testsched"
+ else
+ msg " Elevator already disabled"
+ fi
+ else
+ warn "Sys node for elevator doesn't exist: '$testsched'"
+ fi
fi
# Which distribution are we testing on.
@@ -186,10 +203,11 @@ esac
# Required to be defined by caller: Are we testing as root or a regular user
case "$PRIV_NAME" in
root)
- if [[ "$TEST_FLAVOR" = "sys" ]]; then
+ if [[ "$TEST_FLAVOR" = "sys" || "$TEST_FLAVOR" = "apiv2" ]]; then
# Used in local image-scp testing
setup_rootless
echo "PODMAN_ROOTLESS_USER=$ROOTLESS_USER" >> /etc/ci_environment
+ echo "PODMAN_ROOTLESS_UID=$ROOTLESS_UID" >> /etc/ci_environment
fi
;;
rootless)
@@ -203,6 +221,7 @@ esac
if [[ -n "$ROOTLESS_USER" ]]; then
echo "ROOTLESS_USER=$ROOTLESS_USER" >> /etc/ci_environment
+ echo "ROOTLESS_UID=$ROOTLESS_UID" >> /etc/ci_environment
fi
# Required to be defined by caller: Are we testing podman or podman-remote client
@@ -294,6 +313,13 @@ case "$TEST_FLAVOR" in
install_test_configs
;;
+ machine)
+ rpm -ivh $PACKAGE_DOWNLOAD_DIR/podman-gvproxy*
+ remove_packaged_podman_files
+ make install.tools
+ make install PREFIX=/usr ETCDIR=/etc
+ install_test_configs
+ ;;
gitlab)
# This only runs on Ubuntu for now
if [[ "$OS_RELEASE_ID" != "ubuntu" ]]; then
diff --git a/contrib/msi/podman.wxs b/contrib/msi/podman.wxs
index 786465589..ac2b5f328 100644
--- a/contrib/msi/podman.wxs
+++ b/contrib/msi/podman.wxs
@@ -41,7 +41,7 @@
<CustomAction Id="AddPath" ExeCommand="add" FileKey="8F507E28-A61D-4E64-A92B-B5A00F023AE8" Execute="deferred" Impersonate="yes" Return="check"/>
<CustomAction Id="RemovePath" ExeCommand="remove" FileKey="8F507E28-A61D-4E64-A92B-B5A00F023AE8" Execute="deferred" Impersonate="yes" Return="check"/>
-
+ <CustomAction Id='LaunchFile' ExeCommand="open &quot;[INSTALLDIR]podman-for-windows.html&quot;" FileKey="8F507E28-A61D-4E64-A92B-B5A00F023AE8" Execute="immediate" Impersonate="yes" Return="check"/>
<Feature Id="Complete" Level="1">
<ComponentRef Id="INSTALLDIR_Component"/>
<ComponentRef Id="MainExecutable"/>
@@ -55,8 +55,9 @@
<InstallExecuteSequence>
<RemoveExistingProducts Before="InstallInitialize"/>
- <Custom Action="AddPath" After="InstallFiles">NOT Installed</Custom>
+ <Custom Action="AddPath" Before="InstallFinalize" After="InstallFiles">NOT Installed</Custom>
<Custom Action="RemovePath" Before="RemoveFiles" After="InstallInitialize">(REMOVE="ALL") AND (NOT UPGRADINGPRODUCTCODE)</Custom>
+ <Custom Action='LaunchFile' After='InstallFinalize'>(NOT Installed) AND (NOT UILevel=2)</Custom>
</InstallExecuteSequence>
</Product>
diff --git a/contrib/podmanimage/README.md b/contrib/podmanimage/README.md
index 4f184ca28..0f4f715ad 100644
--- a/contrib/podmanimage/README.md
+++ b/contrib/podmanimage/README.md
@@ -4,7 +4,7 @@
## Overview
-This directory contains the Dockerfiles necessary to create the podmanimage container
+This directory contains the Containerfiles necessary to create the podmanimage container
images that are housed on quay.io under the Podman account. All repositories where
the images live are public and can be pulled without credentials. These container images are secured and the
resulting containers can run safely with privileges within the container.
@@ -21,18 +21,20 @@ The container images are:
and stable version of podman. For the most recent `<version>` tags (`vX`,
`vX.Y`, and `vX.Y.Z`) the image contents will be updated daily to incorporate
(especially) security upgrades. For build details, please [see the
- configuration file](stable/Dockerfile).
+ configuration file](stable/Containerfile).
* `quay.io/containers/podman:latest` and `quay.io/podman/stable:latest` -
- Built daily using the same Dockerfile as above. The Podman version
+ Built daily using the same Containerfile as above. The Podman version
will remain the "latest" available in Fedora, however the other image
contents may vary compared to the version-tagged images.
* `quay.io/podman/testing:latest` - This image is built daily, using the
latest version of Podman that was in the Fedora `updates-testing` repository.
- The image is Built with [the testing Dockerfile](testing/Dockerfile).
+ The image is Built with [the testing Containerfile](testing/Containerfile).
* `quay.io/podman/upstream:latest` - This image is built daily using the latest
code found in this GitHub repository. Due to the image changing frequently,
it's not guaranteed to be stable or even executable. The image is built with
- [the upstream Dockerfile](upstream/Dockerfile).
+ [the upstream Containerfile](upstream/Containerfile). Note the actual compilation
+ of upstream podman [occurs continuously in
+ COPR](https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/).
## Sample Usage
diff --git a/contrib/podmanimage/stable/Containerfile b/contrib/podmanimage/stable/Containerfile
new file mode 100644
index 000000000..70ff439d9
--- /dev/null
+++ b/contrib/podmanimage/stable/Containerfile
@@ -0,0 +1,58 @@
+# stable/Containerfile
+#
+# Build a Podman container image from the latest
+# stable version of Podman on the Fedoras Updates System.
+# https://bodhi.fedoraproject.org/updates/?search=podman
+# This image can be used to create a secured container
+# that runs safely with privileges within the container.
+#
+FROM registry.fedoraproject.org/fedora:latest
+
+# Don't include container-selinux and remove
+# directories used by dnf that are just taking
+# up space.
+# TODO: rpm --setcaps... needed due to Fedora (base) image builds
+# being (maybe still?) affected by
+# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
+RUN dnf -y update && \
+ rpm --setcaps shadow-utils 2>/dev/null && \
+ dnf -y install podman fuse-overlayfs \
+ --exclude container-selinux && \
+ dnf clean all && \
+ rm -rf /var/cache /var/log/dnf* /var/log/yum.*
+
+RUN useradd podman; \
+echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \
+echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid;
+
+ARG _REPO_URL="https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable"
+ADD $_REPO_URL/containers.conf /etc/containers/containers.conf
+ADD $_REPO_URL/podman-containers.conf /home/podman/.config/containers/containers.conf
+
+RUN mkdir -p /home/podman/.local/share/containers && \
+ chown podman:podman -R /home/podman && \
+ chmod 644 /etc/containers/containers.conf
+
+# Copy & modify the defaults to provide reference if runtime changes needed.
+# Changes here are required for running with fuse-overlay storage inside container.
+RUN sed -e 's|^#mount_program|mount_program|g' \
+ -e '/additionalimage.*/a "/var/lib/shared",' \
+ -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
+ /usr/share/containers/storage.conf \
+ > /etc/containers/storage.conf
+
+# Note VOLUME options must always happen after the chown call above
+# RUN commands can not modify existing volumes
+VOLUME /var/lib/containers
+VOLUME /home/podman/.local/share/containers
+
+RUN mkdir -p /var/lib/shared/overlay-images \
+ /var/lib/shared/overlay-layers \
+ /var/lib/shared/vfs-images \
+ /var/lib/shared/vfs-layers && \
+ touch /var/lib/shared/overlay-images/images.lock && \
+ touch /var/lib/shared/overlay-layers/layers.lock && \
+ touch /var/lib/shared/vfs-images/images.lock && \
+ touch /var/lib/shared/vfs-layers/layers.lock
+
+ENV _CONTAINERS_USERNS_CONFIGURED=""
diff --git a/contrib/podmanimage/stable/Dockerfile b/contrib/podmanimage/stable/Dockerfile
deleted file mode 100644
index 78d820458..000000000
--- a/contrib/podmanimage/stable/Dockerfile
+++ /dev/null
@@ -1,36 +0,0 @@
-# stable/Dockerfile
-#
-# Build a Podman container image from the latest
-# stable version of Podman on the Fedoras Updates System.
-# https://bodhi.fedoraproject.org/updates/?search=podman
-# This image can be used to create a secured container
-# that runs safely with privileges within the container.
-#
-FROM registry.fedoraproject.org/fedora:latest
-
-# Don't include container-selinux and remove
-# directories used by yum that are just taking
-# up space.
-RUN dnf -y update; rpm --restore shadow-utils 2>/dev/null; \
-yum -y install podman fuse-overlayfs --exclude container-selinux; \
-rm -rf /var/cache /var/log/dnf* /var/log/yum.*
-
-RUN useradd podman; \
-echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \
-echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid;
-
-ADD https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable/containers.conf /etc/containers/containers.conf
-ADD https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable/podman-containers.conf /home/podman/.config/containers/containers.conf
-
-RUN mkdir -p /home/podman/.local/share/containers; chown podman:podman -R /home/podman
-
-# Note VOLUME options must always happen after the chown call above
-# RUN commands can not modify existing volumes
-VOLUME /var/lib/containers
-VOLUME /home/podman/.local/share/containers
-
-# chmod containers.conf and adjust storage.conf to enable Fuse storage.
-RUN chmod 644 /etc/containers/containers.conf; sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
-RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers /var/lib/shared/vfs-images /var/lib/shared/vfs-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock; touch /var/lib/shared/vfs-images/images.lock; touch /var/lib/shared/vfs-layers/layers.lock
-
-ENV _CONTAINERS_USERNS_CONFIGURED=""
diff --git a/contrib/podmanimage/testing/Containerfile b/contrib/podmanimage/testing/Containerfile
new file mode 100644
index 000000000..65c06f98c
--- /dev/null
+++ b/contrib/podmanimage/testing/Containerfile
@@ -0,0 +1,63 @@
+# testing/Containerfile
+#
+# Build a Podman container image from the latest
+# stable version of Podman on the Fedoras Updates System.
+# https://bodhi.fedoraproject.org/updates/?search=podman
+# This image can be used to create a secured container
+# that runs safely with privileges within the container.
+#
+FROM registry.fedoraproject.org/fedora:latest
+
+# Don't include container-selinux and remove
+# directories used by dnf that are just taking
+# up space.
+# TODO: rpm --setcaps... needed due to Fedora (base) image builds
+# being (maybe still?) affected by
+# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
+RUN dnf -y update && \
+ rpm --setcaps shadow-utils 2>/dev/null && \
+ dnf -y install podman fuse-overlayfs \
+ --exclude container-selinux --enablerepo updates-testing && \
+ dnf clean all && \
+ rm -rf /var/cache /var/log/dnf* /var/log/yum.*
+
+RUN useradd podman; \
+echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \
+echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid;
+
+ARG _REPO_URL="https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable"
+ADD $_REPO_URL/containers.conf /etc/containers/containers.conf
+ADD $_REPO_URL/podman-containers.conf /home/podman/.config/containers/containers.conf
+
+RUN mkdir -p /home/podman/.local/share/containers && \
+ chown podman:podman -R /home/podman
+
+# Copy & modify the defaults to provide reference if runtime changes needed.
+# Changes here are required for running with fuse-overlay storage inside container.
+RUN sed -e 's|^#mount_program|mount_program|g' \
+ -e '/additionalimage.*/a "/var/lib/shared",' \
+ -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
+ /usr/share/containers/storage.conf \
+ > /etc/containers/storage.conf
+
+# Note VOLUME options must always happen after the chown call above
+# RUN commands can not modify existing volumes
+VOLUME /var/lib/containers
+VOLUME /home/podman/.local/share/containers
+
+# chmod containers.conf and adjust storage.conf to enable Fuse storage.
+RUN chmod 644 /etc/containers/containers.conf && \
+ sed -i -e 's|^#mount_program|mount_program|g' \
+ -e '/additionalimage.*/a "/var/lib/shared",' \
+ -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
+ /etc/containers/storage.conf
+RUN mkdir -p /var/lib/shared/overlay-images \
+ /var/lib/shared/overlay-layers \
+ /var/lib/shared/vfs-images \
+ /var/lib/shared/vfs-layers && \
+ touch /var/lib/shared/overlay-images/images.lock && \
+ touch /var/lib/shared/overlay-layers/layers.lock && \
+ touch /var/lib/shared/vfs-images/images.lock && \
+ touch /var/lib/shared/vfs-layers/layers.lock
+
+ENV _CONTAINERS_USERNS_CONFIGURED=""
diff --git a/contrib/podmanimage/testing/Dockerfile b/contrib/podmanimage/testing/Dockerfile
deleted file mode 100644
index 41af1c849..000000000
--- a/contrib/podmanimage/testing/Dockerfile
+++ /dev/null
@@ -1,36 +0,0 @@
-# testing/Dockerfile
-#
-# Build a Podman image using the latest
-# version of Podman that is in updates-testing
-# on the Fedoras Updates System. At times this
-# may be the same the latest stable version.
-# https://bodhi.fedoraproject.org/updates/?search=podman
-# This image can be used to create a secured container
-# that runs safely with privileges within the container.
-#
-FROM registry.fedoraproject.org/fedora:latest
-
-# Don't include container-selinux and remove
-# directories used by yum that are just taking
-# up space.
-RUN yum -y update; rpm --restore shadow-utils 2>/dev/null; yum -y install podman fuse-overlayfs --exclude container-selinux --enablerepo updates-testing; rm -rf /var/cache /var/log/dnf* /var/log/yum.*
-
-RUN useradd podman; \
-echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \
-echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid;
-
-ADD https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable/containers.conf /etc/containers/containers.conf
-ADD https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable/podman-containers.conf /home/podman/.config/containers/containers.conf
-
-RUN mkdir -p /home/podman/.local/share/containers; chown podman:podman -R /home/podman
-
-# Note VOLUME options must always happen after the chown call above
-# RUN commands can not modify existing volumes
-VOLUME /var/lib/containers
-VOLUME /home/podman/.local/share/containers
-
-# chmod containers.conf and adjust storage.conf to enable Fuse storage.
-RUN chmod 644 /etc/containers/containers.conf; sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
-RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers /var/lib/shared/vfs-images /var/lib/shared/vfs-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock; touch /var/lib/shared/vfs-images/images.lock; touch /var/lib/shared/vfs-layers/layers.lock
-
-ENV _CONTAINERS_USERNS_CONFIGURED=""
diff --git a/contrib/podmanimage/upstream/Containerfile b/contrib/podmanimage/upstream/Containerfile
new file mode 100644
index 000000000..96e39c949
--- /dev/null
+++ b/contrib/podmanimage/upstream/Containerfile
@@ -0,0 +1,64 @@
+# upstream/Containerfile
+#
+# Build a Podman container image from the latest
+# upstream version of Podman on GitHub.
+# https://github.com/containers/podman
+# This image can be used to create a secured container
+# that runs safely with privileges within the container.
+# The containers created by this image also come with a
+# Podman development environment in /root/podman.
+#
+FROM registry.fedoraproject.org/fedora:latest
+
+# Don't include container-selinux and remove
+# directories used by dnf that are just taking
+# up space. The latest podman + deps. come from
+# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/
+# TODO: rpm --setcaps... needed due to Fedora (base) image builds
+# being (maybe still?) affected by
+# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
+RUN dnf -y update && \
+ rpm --setcaps shadow-utils 2>/dev/null && \
+ dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
+ dnf -y copr enable rhcontainerbot/podman-next && \
+ dnf -y install podman fuse-overlayfs \
+ --exclude container-selinux \
+ --enablerepo=updates-testing && \
+ dnf clean all && \
+ rm -rf /var/cache /var/log/dnf* /var/log/yum.*
+
+RUN useradd podman; \
+echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \
+echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid;
+
+ARG _REPO_URL="https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable"
+ADD $_REPO_URL/containers.conf /etc/containers/containers.conf
+ADD $_REPO_URL/podman-containers.conf /home/podman/.config/containers/containers.conf
+
+RUN mkdir -p /home/podman/.local/share/containers && \
+ chown podman:podman -R /home/podman && \
+ chmod 644 /etc/containers/containers.conf
+
+# Copy & modify the defaults to provide reference if runtime changes needed.
+# Changes here are required for running with fuse-overlay storage inside container.
+RUN sed -e 's|^#mount_program|mount_program|g' \
+ -e '/additionalimage.*/a "/var/lib/shared",' \
+ -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
+ /usr/share/containers/storage.conf \
+ > /etc/containers/storage.conf
+
+# Note VOLUME options must always happen after the chown call above
+# RUN commands can not modify existing volumes
+VOLUME /var/lib/containers
+VOLUME /home/podman/.local/share/containers
+
+RUN mkdir -p /var/lib/shared/overlay-images \
+ /var/lib/shared/overlay-layers \
+ /var/lib/shared/vfs-images \
+ /var/lib/shared/vfs-layers && \
+ touch /var/lib/shared/overlay-images/images.lock && \
+ touch /var/lib/shared/overlay-layers/layers.lock && \
+ touch /var/lib/shared/vfs-images/images.lock && \
+ touch /var/lib/shared/vfs-layers/layers.lock
+
+ENV _CONTAINERS_USERNS_CONFIGURED=""
diff --git a/contrib/podmanimage/upstream/Dockerfile b/contrib/podmanimage/upstream/Dockerfile
deleted file mode 100644
index 0769a7612..000000000
--- a/contrib/podmanimage/upstream/Dockerfile
+++ /dev/null
@@ -1,85 +0,0 @@
-# git/Dockerfile
-#
-# Build a Podman container image from the latest
-# upstream version of Podman on GitHub.
-# https://github.com/containers/podman
-# This image can be used to create a secured container
-# that runs safely with privileges within the container.
-# The containers created by this image also come with a
-# Podman development environment in /root/podman.
-#
-FROM registry.fedoraproject.org/fedora:latest
-ENV GOPATH=/root/podman
-
-# Install the software required to build Podman.
-# Then create a directory and clone from the Podman
-# GitHub repository, make and install Podman
-# to the container.
-# Finally remove the podman directory and a few other packages
-# that are needed for building but not running Podman
-RUN yum -y update; rpm --restore shadow-utils 2>/dev/null; yum -y install --exclude container-selinux \
- --enablerepo=updates-testing \
- btrfs-progs-devel \
- containernetworking-cni \
- conmon \
- device-mapper-devel \
- git \
- glib2-devel \
- glibc-devel \
- glibc-static \
- go \
- golang-github-cpuguy83-md2man \
- gpgme-devel \
- iptables \
- libassuan-devel \
- libgpg-error-devel \
- libseccomp-devel \
- libselinux-devel \
- make \
- pkgconfig \
- crun \
- fuse-overlayfs \
- fuse3 \
- containers-common \
- podman-plugins; \
- mkdir /root/podman; \
- git clone https://github.com/containers/podman /root/podman/src/github.com/containers/podman; \
- cd /root/podman/src/github.com/containers/podman; \
- make BUILDTAGS="selinux seccomp"; \
- make install PREFIX=/usr; \
- cd /root/podman; \
- git clone https://github.com/containers/conmon /root/podman/conmon; \
- cd conmon; \
- make; \
- install -D -m 755 bin/conmon /usr/libexec/podman/conmon; \
- git clone https://github.com/containernetworking/plugins.git $GOPATH/src/github.com/containernetworking/plugins; \
- cd $GOPATH/src/github.com/containernetworking/plugins; \
- ./build_linux.sh; \
- mkdir -p /usr/libexec/cni; \
- \cp -fR bin/* /usr/libexec/cni; \
- mkdir -p /etc/cni/net.d; \
- curl -qsSL https://raw.githubusercontent.com/containers/podman/main/cni/87-podman-bridge.conflist | tee /etc/cni/net.d/99-loopback.conf; \
- mkdir -p /usr/share/containers; \
- rm -rf /root/podman/*; \
- yum -y remove git golang go-md2man make; \
- yum clean all;
-
-RUN useradd podman; \
-echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \
-echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid;
-
-ADD https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable/containers.conf /etc/containers/containers.conf
-ADD https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable/podman-containers.conf /home/podman/.config/containers/containers.conf
-
-RUN mkdir -p /home/podman/.local/share/containers; chown podman:podman -R /home/podman
-
-# Note VOLUME options must always happen after the chown call above
-# RUN commands can not modify existing volumes
-VOLUME /var/lib/containers
-VOLUME /home/podman/.local/share/containers
-
-# chmod containers.conf and adjust storage.conf to enable Fuse storage.
-RUN chmod 644 /etc/containers/containers.conf; sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
-RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers /var/lib/shared/vfs-images /var/lib/shared/vfs-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock; touch /var/lib/shared/vfs-images/images.lock; touch /var/lib/shared/vfs-layers/layers.lock
-
-ENV _CONTAINERS_USERNS_CONFIGURED=""
diff --git a/contrib/systemd/system/podman-play-kube@.service.in b/contrib/systemd/system/podman-kube@.service.in
index 824f71eb0..824f71eb0 100644
--- a/contrib/systemd/system/podman-play-kube@.service.in
+++ b/contrib/systemd/system/podman-kube@.service.in
diff --git a/contrib/systemd/system/podman-restart.service.in b/contrib/systemd/system/podman-restart.service.in
index 1f13e57e1..de0249381 100644
--- a/contrib/systemd/system/podman-restart.service.in
+++ b/contrib/systemd/system/podman-restart.service.in
@@ -2,12 +2,15 @@
Description=Podman Start All Containers With Restart Policy Set To Always
Documentation=man:podman-start(1)
StartLimitIntervalSec=0
+Wants=network-online.target
+After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=true
Environment=LOGGING="--log-level=info"
ExecStart=@@PODMAN@@ $LOGGING start --all --filter restart-policy=always
+ExecStop=/bin/sh -c '@@PODMAN@@ $LOGGING stop $(@@PODMAN@@ container ls --filter restart-policy=always -q)'
[Install]
WantedBy=default.target
diff --git a/docs/play_kube_support.md b/docs/play_kube_support.md
new file mode 100644
index 000000000..3cfd3fa50
--- /dev/null
+++ b/docs/play_kube_support.md
@@ -0,0 +1,175 @@
+# Podman Play Kube Support
+
+This document outlines the kube yaml fields that are currently supported by the **podman play kube** command.
+
+Note: **N/A** means that the option cannot be supported in a single-node Podman environment.
+
+## Pod Fields
+
+| Field | Support |
+|---------------------------------------------------|---------|
+| containers | ✅ |
+| initContainers | ✅ |
+| imagePullSecrets | |
+| enableServiceLinks | |
+| os<nolink>.name | |
+| volumes | |
+| nodeSelector | N/A |
+| nodeName | N/A |
+| affinity.nodeAffinity | N/A |
+| affinity.podAffinity | N/A |
+| affinity.podAntiAffinity | N/A |
+| tolerations.key | N/A |
+| tolerations.operator | N/A |
+| tolerations.effect | N/A |
+| tolerations.tolerationSeconds | N/A |
+| schedulerName | N/A |
+| runtimeClassName | |
+| priorityClassName | |
+| priority | |
+| topologySpreadConstraints.maxSkew | N/A |
+| topologySpreadConstraints.topologyKey | N/A |
+| topologySpreadConstraints.whenUnsatisfiable | N/A |
+| topologySpreadConstraints.labelSelector | N/A |
+| topologySpreadConstraints.minDomains | N/A |
+| restartPolicy | ✅ |
+| terminationGracePeriod | |
+| activeDeadlineSeconds | |
+| readinessGates.conditionType | |
+| hostname | ✅ |
+| setHostnameAsFQDN | |
+| subdomain | |
+| hostAliases.hostnames | ✅ |
+| hostAliases.ip | ✅ |
+| dnsConfig.nameservers | ✅ |
+| dnsConfig<nolink>.options.name | ✅ |
+| dnsConfig.options.value | ✅ |
+| dnsConfig.searches | ✅ |
+| dnsPolicy | |
+| hostNetwork | ✅ |
+| hostPID | |
+| hostIPC | |
+| shareProcessNamespace | ✅ |
+| serviceAccountName | |
+| automountServiceAccountToken | |
+| securityContext.runAsUser | |
+| securityContext.runAsNonRoot | |
+| securityContext.runAsGroup | |
+| securityContext.supplementalGroups | |
+| securityContext.fsGroup | |
+| securityContext.fsGroupChangePolicy | |
+| securityContext.seccompProfile.type | |
+| securityContext.seccompProfile.localhostProfile | |
+| securityContext.seLinuxOptions.level | |
+| securityContext.seLinuxOptions.role | |
+| securityContext.seLinuxOptions.type | |
+| securityContext.seLinuxOptions.user | |
+| securityContext<nolink>.sysctls.name | |
+| securityContext.sysctls.value | |
+| securityContext.windowsOptions.gmsaCredentialSpec | |
+| securityContext.windowsOptions.hostProcess | |
+| securityContext.windowsOptions.runAsUserName | |
+
+## Container Fields
+
+| Field | Support |
+|---------------------------------------------------|---------|
+| name | ✅ |
+| image | ✅ |
+| imagePullPolicy | ✅ |
+| command | ✅ |
+| args | ✅ |
+| workingDir | ✅ |
+| ports.containerPort | ✅ |
+| ports.hostIP | ✅ |
+| ports.hostPort | ✅ |
+| ports<nolink>.name | ✅ |
+| ports.protocol | ✅ |
+| env<nolink>.name | ✅ |
+| env.value | ✅ |
+| env.valueFrom.configMapKeyRef.key | ✅ |
+| env<nolink>.valueFrom.configMapKeyRef.name | ✅ |
+| env.valueFrom.configMapKeyRef.optional | ✅ |
+| env.valueFrom.fieldRef | ✅ |
+| env.valueFrom.resourceFieldRef | ✅ |
+| env.valueFrom.secretKeyRef.key | ✅ |
+| env<nolink>.valueFrom.secretKeyRef.name | ✅ |
+| env.valueFrom.secretKeyRef.optional | ✅ |
+| envFrom<nolink>.configMapRef.name | ✅ |
+| envFrom.configMapRef.optional | ✅ |
+| envFrom.prefix | |
+| envFrom<nolink>.secretRef.name | ✅ |
+| envFrom.secretRef.optional | ✅ |
+| volumeMounts.mountPath | ✅ |
+| volumeMounts<nolink>.name | ✅ |
+| volumeMounts.mountPropagation | |
+| volumeMounts.readOnly | ✅ |
+| volumeMounts.subPath | |
+| volumeMounts.subPathExpr | |
+| volumeDevices.devicePath | |
+| volumeDevices<nolink>.name | |
+| resources.limits | ✅ |
+| resources.requests | ✅ |
+| lifecycle.postStart | |
+| lifecycle.preStop | |
+| terminationMessagePath | |
+| terminationMessagePolicy | |
+| livenessProbe | ✅ |
+| readinessProbe | |
+| startupProbe | |
+| securityContext.runAsUser | ✅ |
+| securityContext.runAsNonRoot | |
+| securityContext.runAsGroup | ✅ |
+| securityContext.readOnlyRootFilesystem | ✅ |
+| securityContext.procMount | |
+| securityContext.privileged | ✅ |
+| securityContext.allowPrivilegeEscalation | ✅ |
+| securityContext.capabilities.add | ✅ |
+| securityContext.capabilities.drop | ✅ |
+| securityContext.seccompProfile.type | |
+| securityContext.seccompProfile.localhostProfile | |
+| securityContext.seLinuxOptions.level | ✅ |
+| securityContext.seLinuxOptions.role | ✅ |
+| securityContext.seLinuxOptions.type | ✅ |
+| securityContext.seLinuxOptions.user | ✅ |
+| securityContext.windowsOptions.gmsaCredentialSpec | |
+| securityContext.windowsOptions.hostProcess | |
+| securityContext.windowsOptions.runAsUserName | |
+| stdin | |
+| stdinOnce | |
+| tty | |
+
+## PersistentVolumeClaim Fields
+
+| Field | Support |
+|--------------------|---------|
+| volumeName | |
+| storageClassName | ✅ |
+| volumeMode | |
+| accessModes | ✅ |
+| selector | |
+| resources.limits | |
+| resources.requests | ✅ |
+
+## ConfigMap Fields
+
+| Field | Support |
+|------------|---------|
+| binaryData | |
+| data | ✅ |
+| immutable | |
+
+## Deployment Fields
+
+| Field | Support |
+|---------------------------------------|---------|
+| replicas | ✅ |
+| selector | ✅ |
+| template | ✅ |
+| minReadySeconds | |
+| strategy.type | |
+| strategy.rollingUpdate.maxSurge | |
+| strategy.rollingUpdate.maxUnavailable | |
+| revisionHistoryLimit | |
+| progressDeadlineSeconds | |
+| paused | |
diff --git a/docs/remote-docs.sh b/docs/remote-docs.sh
index 8249fc497..4c2602f80 100755
--- a/docs/remote-docs.sh
+++ b/docs/remote-docs.sh
@@ -86,6 +86,16 @@ function html_fn() {
-o $TARGET/${file%%.*}.html $markdown
}
+function html_standalone() {
+ local markdown=$1
+ local title=$2
+ local file=$(basename $markdown)
+ local dir=$(dirname $markdown)
+ (cd $dir; pandoc --ascii --from markdown-smart -c ../standalone-styling.css \
+ --standalone --self-contained --metadata title="$2" -V title= \
+ $file) > $TARGET/${file%%.*}.html
+}
+
# Run 'podman help' (possibly against a subcommand, e.g. 'podman help image')
# and return a list of each first word under 'Available Commands', that is,
# the command name but not its description.
@@ -165,3 +175,6 @@ for s in $SOURCES; do
fi
done
rename
+if [[ "$PLATFORM" == "windows" ]]; then
+ html_standalone docs/tutorials/podman-for-windows.md 'Podman for Windows'
+fi
diff --git a/docs/source/Tutorials.rst b/docs/source/Tutorials.rst
index c2cbcb8a9..024e6847c 100644
--- a/docs/source/Tutorials.rst
+++ b/docs/source/Tutorials.rst
@@ -4,11 +4,11 @@ Tutorials
=========
Here are a number of useful tutorials to get you up and running with Podman. If you are familiar with the Docker `Container Engine`_ the command in Podman_ should be quite familiar. If you are brand new to containers, take a look at our `Introduction`.
-* `Basic Setup and Use of Podman <https://github.com/containers/podman/blob/main/docs/tutorials/podman_tutorial.md>`_: Learn how to setup Podman and perform some basic commands with the utility.
-* `Basic Setup and Use of Podman in a Rootless environment <https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md>`_: The steps required to setup rootless Podman are enumerated.
+* `Basic Setup and Use of Podman <https://github.com/containers/podman/blob/main/docs/tutorials/podman_tutorial.md>`_: Learn how to set up Podman and perform some basic commands with the utility.
+* `Basic Setup and Use of Podman in a Rootless environment <https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md>`_: The steps required to set up rootless Podman are enumerated.
* `Podman for Windows <https://github.com/containers/podman/blob/main/docs/tutorials/podman-for-windows.md>`_: A guide to installing and using Podman on Windows.
* `Podman Remote Clients on Mac/Windows <https://github.com/containers/podman/blob/main/docs/tutorials/mac_win_client.md>`_: Advanced setup for connecting to a remote Linux system using the Podman remote client on Mac and Windows.
-* `How to sign and distribute container images using Podman <https://github.com/containers/podman/blob/main/docs/tutorials/image_signing.md>`_: Learn how to setup and use image signing with Podman.
+* `How to sign and distribute container images using Podman <https://github.com/containers/podman/blob/main/docs/tutorials/image_signing.md>`_: Learn how to set up and use image signing with Podman.
* `Podman remote-client tutorial <https://github.com/containers/podman/blob/main/docs/tutorials/remote_client.md>`_: A brief how-to on using the Podman remote-client.
* `How to use libpod for custom/derivative projects <https://github.com/containers/podman/blob/main/docs/tutorials/podman-derivative-api.md>`_: How the libpod API can be used within your own project.
* `How to use Podman's Go RESTful bindings <https://github.com/containers/podman/tree/main/pkg/bindings>`_: An introduction to using our RESTful Golang bindings in an external application.
diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index b372bfce6..bf710022e 100644
--- a/docs/source/markdown/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
@@ -91,6 +91,33 @@ instructions read from the Containerfiles in the same way that environment
variables are, but which will not be added to environment variable list in the
resulting image's configuration.
+#### **--build-context**=*name=value*
+
+Specify an additional build context using its short name and its location.
+Additional build contexts can be referenced in the same manner as we access
+different stages in COPY instruction.
+
+Valid values could be:
+
+* Local directory – e.g. --build-context project2=../path/to/project2/src (This option is not available with the remote Podman client. On Podman machine setup (i.e macOS and Winows) path must exists on the machine VM)
+* HTTP URL to a tarball – e.g. --build-context src=https://example.org/releases/src.tar
+* Container image – specified with a container-image:// prefix, e.g. --build-context alpine=container-image://alpine:3.15, (also accepts docker://, docker-image://)
+
+On the Containerfile side, you can reference the build context on all
+commands that accept the “from” parameter. Here’s how that might look:
+
+```dockerfile
+FROM [name]
+COPY --from=[name] ...
+RUN --mount=from=[name] …
+```
+
+The value of [name] is matched with the following priority order:
+
+* Named build context defined with --build-context [name]=..
+* Stage defined with AS [name] inside Containerfile
+* Image [name], either local or in a remote registry
+
#### **--cache-from**
Images to utilize as potential cache sources. Podman does not currently support
@@ -140,6 +167,10 @@ This option is added to be aligned with other containers CLIs.
Podman doesn't communicate with a daemon or a remote server.
Thus, compressing the data before sending it is irrelevant to Podman. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
+#### **--cpp-flag**=*flags*
+
+Set additional flags to pass to the C Preprocessor cpp(1). Containerfiles ending with a ".in" suffix will be preprocessed via cpp(1). This option can be used to pass additional flags to cpp.Note: You can also set default CPPFLAGS by setting the BUILDAH_CPPFLAGS environment variable (e.g., export BUILDAH_CPPFLAGS="-DDEBUG").
+
#### **--cpu-period**=*limit*
Set the CPU period for the Completely Fair Scheduler (CFS), which is a
@@ -396,6 +427,16 @@ BUILDAH\_LAYERS environment variable. `export BUILDAH_LAYERS=true`
Log output which would be sent to standard output and standard error to the
specified file instead of to standard output and standard error.
+This option is not supported on the remote client, including Mac and Windows
+(excluding WSL2) machines.
+
+#### **--logsplit** *bool-value*
+
+If `--logfile` and `--platform` are specified, the `--logsplit` option allows
+end-users to split the log file for each platform into different files in the
+following format: `${logfile}_${platform-os}_${platform-arch}`.
+This option is not supported on the remote client, including Mac and Windows
+(excluding WSL2) machines.
#### **--manifest** "manifest"
@@ -404,8 +445,8 @@ if it does not exist. This option is useful for building multi architecture imag
#### **--memory**, **-m**=*LIMIT*
-Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes),
-m (megabytes), or g (gigabytes))
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes),
+m (mebibytes), or g (gibibytes))
Allows you to constrain the memory available to a container. If the host
supports swap memory, then the **-m** memory setting can be larger than physical
@@ -422,7 +463,7 @@ A limit value equal to memory plus swap. Must be used with the **-m**
the value of --memory.
The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes),
-`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a
+`k` (kibibytes), `m` (mebibytes), or `g` (gibibytes). If you don't specify a
unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
#### **--network**=*mode*, **--net**
@@ -451,6 +492,15 @@ By default, Podman will manage _/etc/hosts_, adding the container's own IP addre
**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified.
This option conflicts with **--add-host**.
+#### **--omit-history**
+
+Omit build history information in the built image. (default false).
+
+This option is useful for the cases where end users explicitly
+want to set `--omit-history` to omit the optional `History` from
+built images or when working with images built using build tools that
+do not include `History` information in their images.
+
#### **--os**=*string*
Set the OS of the image to be built, and that of the base image to be pulled,
@@ -526,27 +576,14 @@ While `podman build` is happy to use base images and build images for any
platform that exists, `RUN` instructions will not be able to succeed without
the help of emulation provided by packages like `qemu-user-static`.
-#### **--pull**
-
-When the option is enabled or set explicitly to `true` (with *--pull=true*)
-pull the image from the first registry it is found in as listed in registries.conf.
-Raise an error if the image could not be pulled, even if the image is present locally.
-
-If the option is disabled (with *--pull=false*), pull the image from the
-registry only if the image is not present locally. Raise an error if the image is not
-in the registries and not present locally.
-
-If the pull option is set to `always` (with *--pull=always*),
-pull the image from the first registry it is found in as listed in registries.conf.
-Raise an error if not found in the registries, even if the image is present locally.
+#### **--pull**=**always**|**missing**|**never**|**newer**
-If the pull option is set to `missing` (with *--pull=missing*),
-Pull the image only if it is not present in the local storage. Raise an error if it
-could neither be found in the local storage or on a registry.
+Pull image policy. The default is **always**.
-If the pull option is set to `never` (with *--pull=never*),
-Do not pull the image from the registry, use only the local version. Raise an error
-if the image is not present locally.
+- **always**, **true**: Always pull the image and throw an error if the pull fails.
+- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails.
+- **never**, **false**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found.
+- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found.
#### **--quiet**, **-q**
@@ -600,8 +637,8 @@ as a seccomp filter
Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater
than `0`.
-Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or
-`g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the
+Unit is optional and can be `b` (bytes), `k` (kibibytes), `m`(mebibytes), or
+`g` (gibibytes). If you omit the unit, the system uses bytes. If you omit the
size entirely, the system uses `64m`.
#### **--sign-by**=*fingerprint*
@@ -882,12 +919,10 @@ container. When the mount propagation policy is set to `slave`, one way mount
propagation is enabled and any mounts completed on the host for that volume will
be visible only inside of the container. To control the mount propagation
property of volume use the `:[r]shared`, `:[r]slave` or `:[r]private`
-propagation flag. The propagation property can be specified only for bind mounted
-volumes and not for internal volumes or named volumes. For mount propagation to
-work on the source mount point (mount point where source dir is mounted on) has
-to have the right propagation properties. For shared volumes, the source mount
-point has to be shared. And for slave volumes, the source mount has to be either
-shared or slave. <sup>[[1]](#Footnote1)</sup>
+propagation flag. For mount propagation to work on the source mount point (mount
+point where source dir is mounted on) has to have the right propagation properties.
+For shared volumes, the source mount point has to be shared. And for slave volumes,
+the source mount has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
Use `df <source-dir>` to determine the source mount and then use
`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to determine propagation
diff --git a/docs/source/markdown/podman-container-cleanup.1.md b/docs/source/markdown/podman-container-cleanup.1.md
index 0f182eded..0ad09efd3 100644
--- a/docs/source/markdown/podman-container-cleanup.1.md
+++ b/docs/source/markdown/podman-container-cleanup.1.md
@@ -1,7 +1,7 @@
% podman-container-cleanup(1)
## NAME
-podman\-container\-cleanup - Cleanup the container's network and mountpoints
+podman\-container\-cleanup - Clean up the container's network and mountpoints
## SYNOPSIS
**podman container cleanup** [*options*] *container* [*container* ...]
@@ -13,7 +13,7 @@ Sometimes container mount points and network stacks can remain if the podman com
## OPTIONS
#### **--all**, **-a**
-Cleanup all *containers*.\
+Clean up all *containers*.\
The default is **false**.\
*IMPORTANT: This OPTION does not need a container name or ID as input argument.*
@@ -40,12 +40,12 @@ After cleanup, remove the image entirely.\
The default is **false**.
## EXAMPLES
-Cleanup the container "mywebserver".
+Clean up the container "mywebserver".
```
$ podman container cleanup mywebserver
```
-Cleanup the containers with the names "mywebserver", "myflaskserver", "860a4b23".
+Clean up the containers with the names "mywebserver", "myflaskserver", "860a4b23".
```
$ podman container cleanup mywebserver myflaskserver 860a4b23
```
diff --git a/docs/source/markdown/podman-container-clone.1.md b/docs/source/markdown/podman-container-clone.1.md
index 69423113d..6d552db75 100644
--- a/docs/source/markdown/podman-container-clone.1.md
+++ b/docs/source/markdown/podman-container-clone.1.md
@@ -131,7 +131,7 @@ Force removal of the original container that we are cloning. Can only be used in
#### **--memory**, **-m**=*limit*
-Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
Allows the memory available to a container to be constrained. If the host
supports swap memory, then the **-m** memory setting can be larger than physical
@@ -143,7 +143,7 @@ If no memory limits are specified, the original container's will be used.
#### **--memory-reservation**=*limit*
-Memory soft limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))
+Memory soft limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
After setting memory reservation, when the system detects memory contention
or low memory, containers are forced to restrict their consumption to their
@@ -159,7 +159,7 @@ A limit value equal to memory plus swap. Must be used with the **-m**
the value of --memory if specified. Otherwise, the container being cloned will be used to derive the swap value.
The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes),
-`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a
+`k` (kibibytes), `m` (mebibytes), or `g` (gibibytes). If you don't specify a
unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
#### **--memory-swappiness**=*number*
diff --git a/docs/source/markdown/podman-container.1.md b/docs/source/markdown/podman-container.1.md
index 36623c718..a66e2789d 100644
--- a/docs/source/markdown/podman-container.1.md
+++ b/docs/source/markdown/podman-container.1.md
@@ -15,7 +15,7 @@ The container command allows you to manage containers
| --------- | --------------------------------------------------- | ---------------------------------------------------------------------------- |
| attach | [podman-attach(1)](podman-attach.1.md) | Attach to a running container. |
| checkpoint | [podman-container-checkpoint(1)](podman-container-checkpoint.1.md) | Checkpoints one or more running containers. |
-| cleanup | [podman-container-cleanup(1)](podman-container-cleanup.1.md) | Cleanup the container's network and mountpoints. |
+| cleanup | [podman-container-cleanup(1)](podman-container-cleanup.1.md) | Clean up the container's network and mountpoints. |
| clone | [podman-container-clone(1)](podman-container-clone.1.md) | Creates a copy of an existing container. |
| commit | [podman-commit(1)](podman-commit.1.md) | Create new image based on the changed container. |
| cp | [podman-cp(1)](podman-cp.1.md) | Copy files/folders between a container and the local filesystem. |
diff --git a/docs/source/markdown/podman-cp.1.md b/docs/source/markdown/podman-cp.1.md
index 0c375675d..bb86e3f13 100644
--- a/docs/source/markdown/podman-cp.1.md
+++ b/docs/source/markdown/podman-cp.1.md
@@ -63,6 +63,10 @@ When set to true, files copied to a container will have changed ownership to the
When set to false, maintain uid/gid from archive sources instead of changing them to the primary uid/gid of the destination container.
The default is **true**.
+#### **--overwrite**
+
+Allow directories to be overwritten with non-directories and vice versa. By default, `podman cp` errors out when attempting to overwrite, for instance, a regular file with a directory. Use this option, if you want to allow this behavior.
+
## ALTERNATIVES
Podman has much stronger capabilities than just `podman cp` to achieve copying files between the host and containers.
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 009209343..de73071c9 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -349,7 +349,7 @@ You need to specify multi option commands in the form of a json string.
Set environment variables
-This option allows arbitrary environment variables that are available for the process to be launched inside of the container. If an environment variable is specified without a value, Podman will check the host environment for a value and set the variable only if it is set on the host. If an environment variable ending in __*__ is specified, Podman will search the host environment for variables starting with the prefix and will add those variables to the container. If an environment variable with a trailing ***** is specified, then a value must be supplied.
+This option allows arbitrary environment variables that are available for the process to be launched inside of the container. If an environment variable is specified without a value, Podman will check the host environment for a value and set the variable only if it is set on the host. As a special case, if an environment variable ending in __*__ is specified without a value, Podman will search the host environment for variables starting with the prefix and will add those variables to the container.
See [**Environment**](#environment) note below for precedence and examples.
@@ -573,7 +573,7 @@ To specify multiple static MAC addresses per container, set multiple networks us
#### **--memory**, **-m**=*limit*
-Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
Allows you to constrain the memory available to a container. If the host
supports swap memory, then the **-m** memory setting can be larger than physical
@@ -583,7 +583,7 @@ system's page size (the value would be very large, that's millions of trillions)
#### **--memory-reservation**=*limit*
-Memory soft limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))
+Memory soft limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
After setting memory reservation, when the system detects memory contention
or low memory, containers are forced to restrict their consumption to their
@@ -599,7 +599,7 @@ A limit value equal to memory plus swap. Must be used with the **-m**
the value of --memory.
The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes),
-`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a
+`k` (kibibytes), `m` (mebibytes), or `g` (gibibytes). If you don't specify a
unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
#### **--memory-swappiness**=*number*
@@ -654,7 +654,7 @@ Current supported mount TYPEs are **bind**, **volume**, **image**, **tmpfs** and
· bind-propagation: shared, slave, private, unbindable, rshared, rslave, runbindable, or rprivate(default). See also mount(2).
- . bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive.
+ . bind-nonrecursive: do not set up a recursive bind mount. By default it is recursive.
. relabel: shared, private.
@@ -826,22 +826,27 @@ container.
Rootless containers cannot have more privileges than the account that launched them.
-#### **--publish**, **-p**=*port*
+#### **--publish**, **-p**=[[_ip_:][_hostPort_]:]_containerPort_[/_protocol_]
-Publish a container's port, or range of ports, to the host
+Publish a container's port, or range of ports, to the host.
-Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort`
Both hostPort and containerPort can be specified as a range of ports.
-When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range.
-(e.g., `podman run -p 1234-1236:1222-1224 --name thisWorks -t busybox`
-but not `podman run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`)
-With host IP: `podman run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage`
+When specifying ranges for both, the number of container ports in the
+range must match the number of host ports in the range.
+
If host IP is set to 0.0.0.0 or not set at all, the port will be bound on all IPs on the host.
+
+By default, Podman will publish TCP ports. To publish a UDP port instead, give
+`udp` as protocol. To publish both TCP and UDP ports, set `--publish` twice,
+with `tcp`, and `udp` as protocols respectively. Rootful containers can also
+publish ports using the `sctp` protocol.
+
Host port does not have to be specified (e.g. `podman run -p 127.0.0.1::80`).
If it is not, the container port will be randomly assigned a port on the host.
-Use `podman port` to see the actual mapping: `podman port CONTAINER $CONTAINERPORT`
-**Note:** if a container will be run within a pod, it is not necessary to publish the port for
+Use **podman port** to see the actual mapping: `podman port $CONTAINER $CONTAINERPORT`.
+
+**Note:** If a container will be run within a pod, it is not necessary to publish the port for
the containers in the pod. The port must only be published by the pod itself. Pod network
stacks act like the network stack on the host - you have a variety of containers in the pod,
and programs in the container, all sharing a single interface and IP address, and
@@ -861,14 +866,14 @@ port to a random port on the host within an *ephemeral port range* defined by
`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host
ports and the exposed ports, use `podman port`.
-#### **--pull**=*missing*
+#### **--pull**=**always**|**missing**|**never**|**newer**
-Pull image before creating ("always"|"missing"|"never") (default "missing").
- 'missing': default value, attempt to pull the latest image from the registries listed in registries.conf if a local image does not exist.Raise an error if the image is not in any listed registry and is not present locally.
- 'always': Pull the image from the first registry it is found in as listed in registries.conf. Raise an error if not found in the registries, even if the image is present locally.
- 'never': do not pull the image from the registry, use only the local version. Raise an error if the image is not present locally.
+Pull image policy. The default is **missing**.
-Defaults to *missing*.
+- **always**: Always pull the image and throw an error if the pull fails.
+- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails.
+- **never**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found.
+- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found.
#### **--quiet**, **-q**
@@ -876,11 +881,11 @@ Suppress output information when pulling images
#### **--read-only**
-Mount the container's root filesystem as read only.
+Mount the container's root filesystem as read-only.
By default a container will have its root filesystem writable allowing processes
to write files anywhere. By specifying the `--read-only` flag the container will have
-its root filesystem mounted as read only prohibiting any writes.
+its root filesystem mounted as read-only prohibiting any writes.
#### **--read-only-tmpfs**
@@ -1001,14 +1006,14 @@ Note: Labeling can be disabled for all containers by setting label=false in the
possible mount options are specified in the **proc(5)** man page.
-- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read-only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting label=false in the **containers.conf** (`/etc/containers/containers.conf` or `$HOME/.config/containers/containers.conf`) file.
#### **--shm-size**=*size*
-Size of `/dev/shm` (format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))
+Size of `/dev/shm` (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`.
When size is `0`, there is no limit on the amount of memory used for IPC by the container.
@@ -1256,9 +1261,9 @@ Podman allocates unique ranges of UIDs and GIDs from the `containers` subordinat
**host**: run in the user namespace of the caller. The processes running in the container will have the same privileges on the host as any other process launched by the calling user (default).
-**keep-id**: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is ignored for containers created by the root user.
+**keep-id**: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is not allowed for containers created by the root user.
-**nomap**: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is ignored for containers created by the root user.
+**nomap**: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is not allowed for containers created by the root user.
**ns:**_namespace_: run the container in the given existing user namespace.
@@ -1290,13 +1295,14 @@ The _options_ is a comma-separated list and can be:
* **rw**|**ro**
* **z**|**Z**
-* [**r**]**shared**|[**r**]**slave**|[**r**]**private**[**r**]**unbindable**
-* [**r**]**bind**
-* [**no**]**exec**
-* [**no**]**dev**
-* [**no**]**suid**
* [**O**]
* [**U**]
+* [**no**]**copy**
+* [**no**]**dev**
+* [**no**]**exec**
+* [**no**]**suid**
+* [**r**]**bind**
+* [**r**]**shared**|[**r**]**slave**|[**r**]**private**[**r**]**unbindable**
The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The volume
will be mounted into the container at this directory.
@@ -1404,12 +1410,10 @@ will be visible inside container but not the other way around. <sup>[[1]](#Footn
To control mount propagation property of a volume one can use the [**r**]**shared**,
[**r**]**slave**, [**r**]**private** or the [**r**]**unbindable** propagation flag.
-Propagation property can be specified only for bind mounted volumes and not for
-internal volumes or named volumes. For mount propagation to work the source mount
-point (the mount point where source dir is mounted on) has to have the right propagation
-properties. For shared volumes, the source mount point has to be shared. And for
-slave volumes, the source mount point has to be either shared or slave.
-<sup>[[1]](#Footnote1)</sup>
+For mount propagation to work the source mount point (the mount point where source dir
+is mounted on) has to have the right propagation properties. For shared volumes, the
+source mount point has to be shared. And for slave volumes, the source mount point
+has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
If you want to recursively mount a volume and all of its submounts into a
container, then you can use the `rbind` option. By default the bind option is
@@ -1600,17 +1604,17 @@ Precedence order (later entries override earlier entries):
- **--env-file** : Any environment variables specified via env-files. If multiple files specified, then they override each other in order of entry.
- **--env** : Any environment variables specified will override previous settings.
-Create containers and set the environment ending with a __*__ and a *****
+Create containers and set the environment ending with a __*__.
+The trailing __*__ glob functionality is only active when no value is specified:
```
$ export ENV1=a
-$ podman create --name ctr --env ENV* alpine printenv ENV1
-$ podman start --attach ctr
-a
-
-$ podman create --name ctr --env ENV*****=b alpine printenv ENV*****
-$ podman start --attach ctr
-b
+$ podman create --name ctr1 --env 'ENV*' alpine env
+$ podman start --attach ctr1 | grep ENV
+ENV1=a
+$ podman create --name ctr2 --env 'ENV*=b' alpine env
+$ podman start --attach ctr2 | grep ENV
+ENV*=b
```
## CONMON
diff --git a/docs/source/markdown/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md
index 5fb4ceace..da61f3456 100644
--- a/docs/source/markdown/podman-exec.1.md
+++ b/docs/source/markdown/podman-exec.1.md
@@ -21,10 +21,11 @@ Start the exec session, but do not attach to it. The command will run in the bac
Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
-#### **--env**, **-e**
+#### **--env**, **-e**=*env*
-You may specify arbitrary environment variables that are available for the
-command to be executed.
+Set environment variables.
+
+This option allows arbitrary environment variables that are available for the process to be launched inside of the container. If an environment variable is specified without a value, Podman will check the host environment for a value and set the variable only if it is set on the host. As a special case, if an environment variable ending in __*__ is specified without a value, Podman will search the host environment for variables starting with the prefix and will add those variables to the container.
#### **--env-file**=*file*
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index 8c3c32d04..56ad4e446 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -14,6 +14,17 @@ Generating unit files for a pod requires the pod to be created with an infra con
_Note: If you use this command with the remote client, including Mac and Windows (excluding WSL2) machines, you would still have to place the generated units on the remote system. Moreover, please make sure that the XDG_RUNTIME_DIR environment variable is set. If unset, you may set it via `export XDG_RUNTIME_DIR=/run/user/$(id -u)`._
+### Kubernetes Integration
+
+A Kubernetes YAML can be executed in systemd via the `podman-kube@.service` systemd template. The template's argument is the path to the YAML file. Given a `workload.yaml` file in the home directory, it can be executed as follows:
+
+```
+$ escaped=$(systemd-escape ~/sysadmin.yaml)
+$ systemctl --user start podman-kube@$escaped.service
+$ systemctl --user is-active podman-kube@$escaped.service
+active
+```
+
## OPTIONS
#### **--after**=*dependency_name*
diff --git a/docs/source/markdown/podman-image-scp.1.md b/docs/source/markdown/podman-image-scp.1.md
index 1d902da91..b6b610a7d 100644
--- a/docs/source/markdown/podman-image-scp.1.md
+++ b/docs/source/markdown/podman-image-scp.1.md
@@ -33,7 +33,7 @@ Suppress the output
```
$ podman image scp alpine
-Loaded image(s): docker.io/library/alpine:latest
+Loaded image: docker.io/library/alpine:latest
```
```
@@ -43,12 +43,12 @@ Copying blob 72e830a4dff5 done
Copying config 85f9dc67c7 done
Writing manifest to image destination
Storing signatures
-Loaded image(s): docker.io/library/alpine:latest
+Loaded image: docker.io/library/alpine:latest
```
```
$ podman image scp Fedora::alpine RHEL::
-Loaded image(s): docker.io/library/alpine:latest
+Loaded image: docker.io/library/alpine:latest
```
```
@@ -59,7 +59,7 @@ Copying blob 9450ef9feb15 [--------------------------------------] 0.0b / 0.0b
Copying config 1f97f0559c done
Writing manifest to image destination
Storing signatures
-Loaded image(s): docker.io/library/alpine:latest
+Loaded image: docker.io/library/alpine:latest
```
```
@@ -73,7 +73,7 @@ Copying blob 5eb901baf107 skipped: already exists
Copying config 696d33ca15 done
Writing manifest to image destination
Storing signatures
-Loaded image(s): docker.io/library/alpine:latest
+Loaded image: docker.io/library/alpine:latest
```
```
@@ -87,7 +87,7 @@ Copying blob 5eb901baf107
Copying config 696d33ca15 done
Writing manifest to image destination
Storing signatures
-Loaded image(s): docker.io/library/alpine:latest
+Loaded image: docker.io/library/alpine:latest
```
## SEE ALSO
diff --git a/docs/source/markdown/podman-kill.1.md b/docs/source/markdown/podman-kill.1.md
index 35ca9f74f..a4f80ac81 100644
--- a/docs/source/markdown/podman-kill.1.md
+++ b/docs/source/markdown/podman-kill.1.md
@@ -14,7 +14,7 @@ The main process inside each container specified will be sent SIGKILL, or any si
## OPTIONS
#### **--all**, **-a**
-Signal all running containers. This does not include paused containers.
+Signal all running and paused containers.
#### **--cidfile**
diff --git a/docs/source/markdown/podman-machine-info.1.md b/docs/source/markdown/podman-machine-info.1.md
new file mode 100644
index 000000000..33c207d32
--- /dev/null
+++ b/docs/source/markdown/podman-machine-info.1.md
@@ -0,0 +1,36 @@
+% podman-machine-info(1)
+
+## NAME
+podman\-machine\-info - Display machine host info
+
+## SYNOPSIS
+**podman machine info**
+
+## DESCRIPTION
+
+Display information pertaining to the machine host.
+Rootless only, as all `podman machine` commands can be only be used with rootless Podman.
+
+## OPTIONS
+
+#### **--format**=*format*, **-f**
+
+Change output format to "json" or a Go template.
+
+#### **--help**
+
+Print usage statement.
+
+## EXAMPLES
+
+```
+$ podman machine info
+$ podman machine info --format json
+$ podman machine info --format {{.Host.Arch}}
+```
+
+## SEE ALSO
+**[podman(1)](podman.1.md)**, **[podman-machine(1)](podman-machine.1.md)**
+
+## HISTORY
+June 2022, Originally compiled by Ashley Cui <acui@redhat.com>
diff --git a/docs/source/markdown/podman-machine-init.1.md b/docs/source/markdown/podman-machine-init.1.md
index 33947bbba..21c98b2c7 100644
--- a/docs/source/markdown/podman-machine-init.1.md
+++ b/docs/source/markdown/podman-machine-init.1.md
@@ -10,9 +10,12 @@ podman\-machine\-init - Initialize a new virtual machine
Initialize a new virtual machine for Podman.
-Podman on macOS requires a virtual machine. This is because containers are Linux -
+Rootless only.
+
+Podman on MacOS and Windows requires a virtual machine. This is because containers are Linux -
containers do not run on any other OS because containers' core functionality are
-tied to the Linux kernel.
+tied to the Linux kernel. Podman machine must be used to manage MacOS and Windows machines,
+but can be optionally used on Linux.
**podman machine init** initializes a new Linux virtual machine where containers are run.
SSH keys are automatically generated to access the VM, and system connections to the root account
@@ -73,15 +76,33 @@ Set the timezone for the machine and containers. Valid values are `local` or
a `timezone` such as `America/Chicago`. A value of `local`, which is the default,
means to use the timezone of the machine host.
-#### **--volume**, **-v**=*source:target*
+#### **--volume**, **-v**=*source:target[:options]*
Mounts a volume from source to target.
Create a mount. If /host-dir:/machine-dir is specified as the `*source:target*`,
Podman mounts _host-dir_ in the host to _machine-dir_ in the Podman machine.
-The root filesystem is mounted read-only in the default operating system,
-so mounts must be created under the /mnt directory.
+Additional options may be specified as a comma-separated string. Recognized
+options are:
+* **ro**: mount volume read-only
+* **rw**: mount volume read/write (default)
+* **security_model=[model]**: specify 9p security model (see below)
+
+The 9p security model [determines] https://wiki.qemu.org/Documentation/9psetup#Starting_the_Guest_directly
+if and how the 9p filesystem translates some filesystem operations before
+actual storage on the host. The
+default value of *mapped-xattr* specifies that 9p store symlinks and some file
+attributes as extended attributes on the host. This is suitable when the host
+and the guest do not need to interoperate on the shared filesystem, but has
+caveats for actual shared access; notably, symlinks on the host are not usable
+on the guest and vice versa. If interoperability is required, then choose
+*none* instead, but keep in mind that the guest will not be able to do things
+that the user running the virtual machine cannot do, e.g. create files owned by
+another user. Using *none* is almost certainly the best choice for read-only
+volumes.
+
+Example: `-v "$HOME/git:$HOME/git:ro,security_model=none"`
Default volume mounts are defined in *containers.conf*. Unless changed, the default values
is `$HOME:$HOME`.
diff --git a/docs/source/markdown/podman-machine-inspect.1.md b/docs/source/markdown/podman-machine-inspect.1.md
index 38eb66b0d..29cd775c2 100644
--- a/docs/source/markdown/podman-machine-inspect.1.md
+++ b/docs/source/markdown/podman-machine-inspect.1.md
@@ -13,6 +13,8 @@ Inspect one or more virtual machines
Obtain greater detail about Podman virtual machines. More than one virtual machine can be
inspected at once.
+Rootless only.
+
## OPTIONS
#### **--format**
diff --git a/docs/source/markdown/podman-machine-list.1.md b/docs/source/markdown/podman-machine-list.1.md
index 0c5310463..a25aae090 100644
--- a/docs/source/markdown/podman-machine-list.1.md
+++ b/docs/source/markdown/podman-machine-list.1.md
@@ -12,9 +12,12 @@ podman\-machine\-list - List virtual machines
List Podman managed virtual machines.
-Podman on macOS requires a virtual machine. This is because containers are Linux -
-containers do not run on any other OS because containers' core functionality is
-tied to the Linux kernel.
+Podman on MacOS and Windows requires a virtual machine. This is because containers are Linux -
+containers do not run on any other OS because containers' core functionality are
+tied to the Linux kernel. Podman machine must be used to manage MacOS and Windows machines,
+but can be optionally used on Linux.
+
+Rootless only.
## OPTIONS
diff --git a/docs/source/markdown/podman-machine-rm.1.md b/docs/source/markdown/podman-machine-rm.1.md
index 4a2c59173..d90b615ce 100644
--- a/docs/source/markdown/podman-machine-rm.1.md
+++ b/docs/source/markdown/podman-machine-rm.1.md
@@ -16,6 +16,7 @@ generated for that VM are also removed as is its image file on the filesystem.
Users get a display of what will be deleted and are required to confirm unless the option `--force`
is used.
+Rootless only.
## OPTIONS
diff --git a/docs/source/markdown/podman-machine-set.1.md b/docs/source/markdown/podman-machine-set.1.md
index de90ee4b0..1daf97a61 100644
--- a/docs/source/markdown/podman-machine-set.1.md
+++ b/docs/source/markdown/podman-machine-set.1.md
@@ -10,6 +10,8 @@ podman\-machine\-set - Sets a virtual machine setting
Change a machine setting.
+Rootless only.
+
## OPTIONS
#### **--cpus**=*number*
diff --git a/docs/source/markdown/podman-machine-ssh.1.md b/docs/source/markdown/podman-machine-ssh.1.md
index db0350961..5432f0e9f 100644
--- a/docs/source/markdown/podman-machine-ssh.1.md
+++ b/docs/source/markdown/podman-machine-ssh.1.md
@@ -14,6 +14,9 @@ first argument must be the virtual machine name. The optional command to
execute can then follow. If no command is provided, an interactive session
with the virtual machine is established.
+The exit code from ssh command will be forwarded to the podman machine ssh caller, see [Exit Codes](#Exit-Codes).
+
+Rootless only.
## OPTIONS
@@ -25,6 +28,35 @@ Print usage statement.
Username to use when SSH-ing into the VM.
+## Exit Codes
+
+The exit code from `podman machine ssh` gives information about why the command failed.
+When `podman machine ssh` commands exit with a non-zero code,
+the exit codes follow the `chroot` standard, see below:
+
+ **125** The error is with podman **_itself_**
+
+ $ podman machine ssh --foo; echo $?
+ Error: unknown flag: --foo
+ 125
+
+ **126** Executing a _contained command_ and the _command_ cannot be invoked
+
+ $ podman machine ssh /etc; echo $?
+ Error: fork/exec /etc: permission denied
+ 126
+
+ **127** Executing a _contained command_ and the _command_ cannot be found
+
+ $ podman machine ssh foo; echo $?
+ Error: fork/exec /usr/bin/bogus: no such file or directory
+ 127
+
+ **Exit code** _contained command_ exit code
+
+ $ podman machine ssh /bin/sh -c 'exit 3'; echo $?
+ 3
+
## EXAMPLES
To get an interactive session with the default virtual machine:
diff --git a/docs/source/markdown/podman-machine-start.1.md b/docs/source/markdown/podman-machine-start.1.md
index e55dcab13..b92494dda 100644
--- a/docs/source/markdown/podman-machine-start.1.md
+++ b/docs/source/markdown/podman-machine-start.1.md
@@ -10,9 +10,12 @@ podman\-machine\-start - Start a virtual machine
Starts a virtual machine for Podman.
-Podman on macOS requires a virtual machine. This is because containers are Linux -
+Rootless only.
+
+Podman on MacOS and Windows requires a virtual machine. This is because containers are Linux -
containers do not run on any other OS because containers' core functionality are
-tied to the Linux kernel.
+tied to the Linux kernel. Podman machine must be used to manage MacOS and Windows machines,
+but can be optionally used on Linux.
Only one Podman managed VM can be active at a time. If a VM is already running,
`podman machine start` will return an error.
diff --git a/docs/source/markdown/podman-machine-stop.1.md b/docs/source/markdown/podman-machine-stop.1.md
index 9aa781561..29f3e81f4 100644
--- a/docs/source/markdown/podman-machine-stop.1.md
+++ b/docs/source/markdown/podman-machine-stop.1.md
@@ -10,9 +10,12 @@ podman\-machine\-stop - Stop a virtual machine
Stops a virtual machine.
-Podman on macOS requires a virtual machine. This is because containers are Linux -
+Rootless only.
+
+Podman on MacOS and Windows requires a virtual machine. This is because containers are Linux -
containers do not run on any other OS because containers' core functionality are
-tied to the Linux kernel.
+tied to the Linux kernel. Podman machine must be used to manage MacOS and Windows machines,
+but can be optionally used on Linux.
**podman machine stop** stops a Linux virtual machine where containers are run.
diff --git a/docs/source/markdown/podman-machine.1.md b/docs/source/markdown/podman-machine.1.md
index e9f6c7d20..6197b8d4e 100644
--- a/docs/source/markdown/podman-machine.1.md
+++ b/docs/source/markdown/podman-machine.1.md
@@ -7,12 +7,20 @@ podman\-machine - Manage Podman's virtual machine
**podman machine** *subcommand*
## DESCRIPTION
-`podman machine` is a set of subcommands that manage Podman's virtual machine on macOS.
+`podman machine` is a set of subcommands that manage Podman's virtual machine.
+
+Podman on MacOS and Windows requires a virtual machine. This is because containers are Linux -
+containers do not run on any other OS because containers' core functionality are
+tied to the Linux kernel. Podman machine must be used to manage MacOS and Windows machines,
+but can be optionally used on Linux.
+
+All `podman machine` commands are rootless only.
## SUBCOMMANDS
| Command | Man Page | Description |
|---------|------------------------------------------------------|-----------------------------------|
+| info | [podman-machine-info(1)](podman-machine-info.1.md) | Display machine host info |
| init | [podman-machine-init(1)](podman-machine-init.1.md) | Initialize a new virtual machine |
| inspect | [podman-machine-inspect(1)](podman-machine-inspect.1.md) | Inspect one or more virtual machines |
| list | [podman-machine-list(1)](podman-machine-list.1.md) | List virtual machines |
@@ -23,7 +31,7 @@ podman\-machine - Manage Podman's virtual machine
| stop | [podman-machine-stop(1)](podman-machine-stop.1.md) | Stop a virtual machine |
## SEE ALSO
-**[podman(1)](podman.1.md)**, **[podman-machine-init(1)](podman-machine-init.1.md)**, **[podman-machine-list(1)](podman-machine-list.1.md)**, **[podman-machine-rm(1)](podman-machine-rm.1.md)**, **[podman-machine-ssh(1)](podman-machine-ssh.1.md)**, **[podman-machine-start(1)](podman-machine-start.1.md)**, **[podman-machine-stop(1)](podman-machine-stop.1.md)**, **[podman-machine-inspect(1)](podman-machine-inspect.1.md)**
+**[podman(1)](podman.1.md)**, **[podman-machine-info(1)](podman-machine-info.1.md)**, **[podman-machine-init(1)](podman-machine-init.1.md)**, **[podman-machine-list(1)](podman-machine-list.1.md)**, **[podman-machine-rm(1)](podman-machine-rm.1.md)**, **[podman-machine-ssh(1)](podman-machine-ssh.1.md)**, **[podman-machine-start(1)](podman-machine-start.1.md)**, **[podman-machine-stop(1)](podman-machine-stop.1.md)**, **[podman-machine-inspect(1)](podman-machine-inspect.1.md)**
## HISTORY
March 2021, Originally compiled by Ashley Cui <acui@redhat.com>
diff --git a/docs/source/markdown/podman-network-create.1.md b/docs/source/markdown/podman-network-create.1.md
index 0cdb6fe88..1d89b12e3 100644
--- a/docs/source/markdown/podman-network-create.1.md
+++ b/docs/source/markdown/podman-network-create.1.md
@@ -4,7 +4,7 @@
podman\-network-create - Create a Podman network
## SYNOPSIS
-**podman network create** [*options*] name
+**podman network create** [*options*] [*name*]
## DESCRIPTION
Create a CNI-network configuration for use with Podman. By default, Podman creates a bridge connection.
diff --git a/docs/source/markdown/podman-network-ls.1.md b/docs/source/markdown/podman-network-ls.1.md
index b341083f9..3c696d404 100644
--- a/docs/source/markdown/podman-network-ls.1.md
+++ b/docs/source/markdown/podman-network-ls.1.md
@@ -25,6 +25,7 @@ Supported filters:
| label | Filter by network with (or without, in the case of label!=[...] is used) the specified labels. |
| name | Filter by network name (accepts `regex`). |
| until | Filter by networks created before given timestamp. |
+| dangling | Filter by networks with no containers attached. |
The `driver` filter accepts values: `bridge`, `macvlan`, `ipvlan`.
@@ -33,6 +34,8 @@ The `label` *filter* accepts two formats. One is the `label`=*key* or `label`=*k
The `until` *filter* can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative to the machine’s time.
+The `dangling` *filter* accepts values `true` or `false`.
+
#### **--format**=*format*
Change the default output format. This can be of a supported type like 'json'
diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index 5c4bdc8c4..92cb694b0 100644
--- a/docs/source/markdown/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
@@ -20,7 +20,7 @@ Currently, the supported Kubernetes kinds are:
`Kubernetes Pods or Deployments`
-Only two volume types are supported by play kube, the *hostPath* and *persistentVolumeClaim* volume types. For the *hostPath* volume type, only the *default (empty)*, *DirectoryOrCreate*, *Directory*, *FileOrCreate*, *File*, and *Socket* subtypes are supported. The *CharDevice* and *BlockDevice* subtypes are not supported. Podman interprets the value of *hostPath* *path* as a file path when it contains at least one forward slash, otherwise Podman treats the value as the name of a named volume. When using a *persistentVolumeClaim*, the value for *claimName* is the name for the Podman named volume.
+Only two volume types are supported by play kube, the *hostPath* and *persistentVolumeClaim* volume types. For the *hostPath* volume type, only the *default (empty)*, *DirectoryOrCreate*, *Directory*, *FileOrCreate*, *File*, *Socket*, *CharDevice* and *BlockDevice* subtypes are supported. Podman interprets the value of *hostPath* *path* as a file path when it contains at least one forward slash, otherwise Podman treats the value as the name of a named volume. When using a *persistentVolumeClaim*, the value for *claimName* is the name for the Podman named volume.
Note: When playing a kube YAML with init containers, the init container will be created with init type value `always`.
@@ -103,6 +103,19 @@ spec:
and as a result environment variable `FOO` will be set to `bar` for container `container-1`.
+### Systemd Integration
+
+A Kubernetes YAML can be executed in systemd via the `podman-kube@.service` systemd template. The template's argument is the path to the YAML file. Given a `workload.yaml` file in the home directory, it can be executed as follows:
+
+```
+$ escaped=$(systemd-escape ~/sysadmin.yaml)
+$ systemctl --user start podman-kube@$escaped.service
+$ systemctl --user is-active podman-kube@$escaped.service
+active
+```
+
+Note that the path to the YAML file must be escaped via `systemd-escape`.
+
## OPTIONS
#### **--annotation**=*key=value*
@@ -276,9 +289,9 @@ Podman allocates unique ranges of UIDs and GIDs from the `containers` subordinat
**host**: create a new namespace for the container.
-**keep-id**: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is ignored for containers created by the root user.
+**keep-id**: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is not allowed for containers created by the root user.
-**nomap**: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is ignored for containers created by the root user.
+**nomap**: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is not allowed for containers created by the root user.
**ns:**_namespace_: run the pod in the given existing user namespace.
diff --git a/docs/source/markdown/podman-pod-clone.1.md b/docs/source/markdown/podman-pod-clone.1.md
new file mode 100644
index 000000000..d90d1efb9
--- /dev/null
+++ b/docs/source/markdown/podman-pod-clone.1.md
@@ -0,0 +1,443 @@
+% podman-pod-clone(1)
+
+## NAME
+podman\-pod\-clone - Creates a copy of an existing pod
+
+## SYNOPSIS
+**podman pod clone** [*options*] *pod* *name*
+
+## DESCRIPTION
+**podman pod clone** creates a copy of a pod, recreating the identical config for the pod and for all of its containers. Users can modify the pods new name and select pod details within the infra container
+
+## OPTIONS
+
+#### **--cgroup-parent**=*path*
+
+Path to cgroups under which the cgroup for the pod will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+
+#### **--cpus**
+
+Set a number of CPUs for the pod that overrides the original pods CPU limits. If none are specified, the original pod's Nano CPUs are used.
+
+#### **--cpuset-cpus**
+
+CPUs in which to allow execution (0-3, 0,1). If none are specified, the original pod's CPUset is used.
+
+#### **--destroy**
+
+Remove the original pod that we are cloning once used to mimic the configuration.
+
+#### **--device**=_host-device_[**:**_container-device_][**:**_permissions_]
+
+Add a host device to the pod. Optional *permissions* parameter
+can be used to specify device permissions. It is a combination of
+**r** for read, **w** for write, and **m** for **mknod**(2).
+
+Example: **--device=/dev/sdc:/dev/xvdc:rwm**.
+
+Note: if _host_device_ is a symbolic link then it will be resolved first.
+The pod will only store the major and minor numbers of the host device.
+
+Note: the pod implements devices by storing the initial configuration passed by the user and recreating the device on each container added to the pod.
+
+Podman may load kernel modules required for using the specified
+device. The devices that Podman will load modules for when necessary are:
+/dev/fuse.
+
+#### **--device-read-bps**=*path*
+
+Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb).
+
+#### **--gidmap**=*pod_gid:host_gid:amount*
+
+GID map for the user namespace. Using this flag will run all containers in the pod with user namespace enabled. It conflicts with the `--userns` and `--subgidname` flags.
+
+#### **--help**, **-h**
+
+Print usage statement.
+
+#### **--hostname**=name
+
+Set a hostname to the pod.
+
+#### **--infra-command**=*command*
+
+The command that will be run to start the infra container. Default: "/pause".
+
+#### **--infra-conmon-pidfile**=*file*
+
+Write the pid of the infra container's **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to manage Podman containers and pods.
+
+#### **--infra-name**=*name*
+
+The name that will be used for the pod's infra container.
+
+#### **--label**=*label*, **-l**
+
+Add metadata to a pod (e.g., --label com.example.key=value).
+
+#### **--label-file**=*label*
+
+Read in a line delimited file of labels.
+
+#### **--memory**, **-m**=*limit*
+
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
+
+Constrains the memory available to a container. If the host
+supports swap memory, then the **-m** memory setting can be larger than physical
+RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
+not limited. The actual limit may be rounded up to a multiple of the operating
+system's page size (the value would be very large, that's millions of trillions).
+
+#### **--name**, **-n**
+
+Set a custom name for the cloned pod. The default if not specified is of the syntax: **<ORIGINAL_NAME>-clone**
+
+#### **--pid**=*pid*
+
+Set the PID mode for the pod. The default is to create a private PID namespace for the pod. Requires the PID namespace to be shared via --share.
+
+ host: use the host’s PID namespace for the pod
+ ns: join the specified PID namespace
+ private: create a new namespace for the pod (default)
+
+#### **--security-opt**=*option*
+
+Security Options
+
+- `apparmor=unconfined` : Turn off apparmor confinement for the pod
+- `apparmor=your-profile` : Set the apparmor confinement profile for the pod
+
+- `label=user:USER` : Set the label user for the pod processes
+- `label=role:ROLE` : Set the label role for the pod processes
+- `label=type:TYPE` : Set the label process type for the pod processes
+- `label=level:LEVEL` : Set the label level for the pod processes
+- `label=filetype:TYPE` : Set the label file type for the pod files
+- `label=disable` : Turn off label separation for the pod
+
+Note: Labeling can be disabled for all pods/containers by setting label=false in the **containers.conf** (`/etc/containers/containers.conf` or `$HOME/.config/containers/containers.conf`) file.
+
+- `mask=/path/1:/path/2` : The paths to mask separated by a colon. A masked path
+ cannot be accessed inside the containers within the pod.
+
+- `no-new-privileges` : Disable container processes from gaining additional privileges.
+
+- `seccomp=unconfined` : Turn off seccomp confinement for the pod
+- `seccomp=profile.json` : Whitelisted syscalls seccomp Json file to be used as a seccomp filter
+
+- `proc-opts=OPTIONS` : Comma-separated list of options to use for the /proc mount. More details for the
+ possible mount options are specified in the **proc(5)** man page.
+
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read-only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
+
+Note: Labeling can be disabled for all containers by setting label=false in the **containers.conf** (`/etc/containers/containers.conf` or `$HOME/.config/containers/containers.conf`) file.
+
+#### **--shm-size**=*size*
+
+Size of `/dev/shm` (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
+If the unit is omitted, the system uses bytes. If the size is omitted, the system uses `64m`.
+When size is `0`, there is no limit on the amount of memory used for IPC by the pod. This option conflicts with **--ipc=host** when running containers.
+
+#### **--start**
+
+When set to true, this flag starts the newly created pod after the
+clone process has completed. All containers within the pod are started.
+
+#### **--subgidname**=*name*
+
+Name for GID map from the `/etc/subgid` file. Using this flag will run the container with user namespace enabled. This flag conflicts with `--userns` and `--gidmap`.
+
+#### **--subuidname**=*name*
+
+Name for UID map from the `/etc/subuid` file. Using this flag will run the container with user namespace enabled. This flag conflicts with `--userns` and `--uidmap`.
+
+#### **--sysctl**=_name_=_value_
+
+Configure namespace kernel parameters for all containers in the new pod.
+
+For the IPC namespace, the following sysctls are allowed:
+
+- kernel.msgmax
+- kernel.msgmnb
+- kernel.msgmni
+- kernel.sem
+- kernel.shmall
+- kernel.shmmax
+- kernel.shmmni
+- kernel.shm_rmid_forced
+- Sysctls beginning with fs.mqueue.\*
+
+Note: if the ipc namespace is not shared within the pod, these sysctls are not allowed.
+
+For the network namespace, only sysctls beginning with net.\* are allowed.
+
+Note: if the network namespace is not shared within the pod, these sysctls are not allowed.
+
+#### **--uidmap**=*container_uid*:*from_uid*:*amount*
+
+Run all containers in the pod in a new user namespace using the supplied mapping. This
+option conflicts with the **--userns** and **--subuidname** options. This
+option provides a way to map host UIDs to container UIDs. It can be passed
+several times to map different ranges.
+
+#### **--userns**=*mode*
+
+Set the user namespace mode for all the containers in a pod. It defaults to the **PODMAN_USERNS** environment variable. An empty value ("") means user namespaces are disabled.
+
+Rootless user --userns=Key mappings:
+
+Key | Host User | Container User
+----------|---------------|---------------------
+"" |$UID |0 (Default User account mapped to root user in container.)
+keep-id |$UID |$UID (Map user account to same UID within container.)
+auto |$UID | nil (Host User UID is not mapped into container.)
+nomap |$UID | nil (Host User UID is not mapped into container.)
+
+Valid _mode_ values are:
+
+ - *auto[:*_OPTIONS,..._*]*: automatically create a namespace. It is possible to specify these options to `auto`:
+
+ - *gidmapping=*_CONTAINER_GID:HOST_GID:SIZE_ to force a GID mapping to be present in the user namespace.
+
+ - *size=*_SIZE_: to specify an explicit size for the automatic user namespace. e.g. `--userns=auto:size=8192`. If `size` is not specified, `auto` will estimate a size for the user namespace.
+
+ - *uidmapping=*_CONTAINER_UID:HOST_UID:SIZE_ to force a UID mapping to be present in the user namespace.
+
+ - *host*: run in the user namespace of the caller. The processes running in the container will have the same privileges on the host as any other process launched by the calling user (default).
+
+ - *keep-id*: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is ignored for containers created by the root user.
+
+ - *nomap*: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is ignored for containers created by the root user.
+
+#### **--uts**=*mode*
+
+Set the UTS namespace mode for the pod. The following values are supported:
+
+- **host**: use the host's UTS namespace inside the pod.
+- **private**: create a new namespace for the pod (default).
+- **ns:[path]**: run the pod in the given existing UTS namespace.
+
+
+#### **--volume**, **-v**[=*[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]
+
+Create a bind mount. If ` -v /HOST-DIR:/CONTAINER-DIR` is specified, Podman
+bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Podman
+container. Similarly, `-v SOURCE-VOLUME:/CONTAINER-DIR` will mount the volume
+in the host to the container. If no such named volume exists, Podman will
+create one. The `OPTIONS` are a comma-separated list and can be: <sup>[[1]](#Footnote1)</sup> (Note when using the remote client, including Mac and Windows (excluding WSL2) machines, the volumes will be mounted from the remote server, not necessarily the client machine.)
+
+The _options_ is a comma-separated list and can be:
+
+* **rw**|**ro**
+* **z**|**Z**
+* [**r**]**shared**|[**r**]**slave**|[**r**]**private**[**r**]**unbindable**
+* [**r**]**bind**
+* [**no**]**exec**
+* [**no**]**dev**
+* [**no**]**suid**
+* [**O**]
+* [**U**]
+
+The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The volume
+will be mounted into the container at this directory.
+
+Volumes may specify a source as well, as either a directory on the host
+or the name of a named volume. If no source is given, the volume will be created as an
+anonymously named volume with a randomly generated name, and will be removed when
+the pod is removed via the `--rm` flag or `podman rm --volumes` commands.
+
+If a volume source is specified, it must be a path on the host or the name of a
+named volume. Host paths are allowed to be absolute or relative; relative paths
+are resolved relative to the directory Podman is run in. If the source does not
+exist, Podman will return an error. Users must pre-create the source files or
+directories.
+
+Any source that does not begin with a `.` or `/` will be treated as the name of
+a named volume. If a volume with that name does not exist, it will be created.
+Volumes created with names are not anonymous, and they are not removed by the `--rm`
+option and the `podman rm --volumes` command.
+
+Specify multiple **-v** options to mount one or more volumes into a
+pod.
+
+ `Write Protected Volume Mounts`
+
+Add `:ro` or `:rw` suffix to a volume to mount it read-only or
+read-write mode, respectively. By default, the volumes are mounted read-write.
+See examples.
+
+ `Chowning Volume Mounts`
+
+By default, Podman does not change the owner and group of source volume
+directories mounted into containers. If a pod is created in a new user
+namespace, the UID and GID in the container may correspond to another UID and
+GID on the host.
+
+The `:U` suffix tells Podman to use the correct host UID and GID based on the
+UID and GID within the pod, to change recursively the owner and group of
+the source volume.
+
+**Warning** use with caution since this will modify the host filesystem.
+
+ `Labeling Volume Mounts`
+
+Labeling systems like SELinux require that proper labels are placed on volume
+content mounted into a pod. Without a label, the security system might
+prevent the processes running inside the pod from using the content. By
+default, Podman does not change the labels set by the OS.
+
+To change a label in the pod context, add either of two suffixes
+`:z` or `:Z` to the volume mount. These suffixes tell Podman to relabel file
+objects on the shared volumes. The `z` option tells Podman that two pods
+share the volume content. As a result, Podman labels the content with a shared
+content label. Shared volume labels allow all containers to read/write content.
+The `Z` option tells Podman to label the content with a private unshared label.
+Only the current pod can use a private volume.
+
+ `Overlay Volume Mounts`
+
+ The `:O` flag tells Podman to mount the directory from the host as a
+temporary storage using the `overlay file system`. The pod processes
+can modify content within the mountpoint which is stored in the
+container storage in a separate directory. In overlay terms, the source
+directory will be the lower, and the container storage directory will be the
+upper. Modifications to the mount point are destroyed when the pod
+finishes executing, similar to a tmpfs mount point being unmounted.
+
+ Subsequent executions of the container will see the original source directory
+content, any changes from previous pod executions no longer exist.
+
+ One use case of the overlay mount is sharing the package cache from the
+host into the container to allow speeding up builds.
+
+ Note:
+
+ - The `O` flag conflicts with other options listed above.
+Content mounted into the container is labeled with the private label.
+ On SELinux systems, labels in the source directory must be readable
+by the infra container label. Usually containers can read/execute `container_share_t`
+and can read/write `container_file_t`. If unable to change the labels on a
+source volume, SELinux container separation must be disabled for the infra container/pod
+to work.
+ - The source directory mounted into the pod with an overlay mount
+should not be modified, it can cause unexpected failures. It is recommended
+to not modify the directory until the container finishes running.
+
+ `Mounts propagation`
+
+By default bind mounted volumes are `private`. That means any mounts done
+inside pod will not be visible on host and vice versa. One can change
+this behavior by specifying a volume mount propagation property. Making a
+volume `shared` mounts done under that volume inside pod will be
+visible on host and vice versa. Making a volume `slave` enables only one
+way mount propagation and that is mounts done on host under that volume
+will be visible inside container but not the other way around. <sup>[[1]](#Footnote1)</sup>
+
+To control mount propagation property of a volume one can use the [**r**]**shared**,
+[**r**]**slave**, [**r**]**private** or the [**r**]**unbindable** propagation flag.
+Propagation property can be specified only for bind mounted volumes and not for
+internal volumes or named volumes. For mount propagation to work the source mount
+point (the mount point where source dir is mounted on) has to have the right propagation
+properties. For shared volumes, the source mount point has to be shared. And for
+slave volumes, the source mount point has to be either shared or slave.
+<sup>[[1]](#Footnote1)</sup>
+
+To recursively mount a volume and all of its submounts into a
+pod, use the `rbind` option. By default the bind option is
+used, and submounts of the source directory will not be mounted into the
+pod.
+
+Mounting the volume with the `nosuid` options means that SUID applications on
+the volume will not be able to change their privilege. By default volumes
+are mounted with `nosuid`.
+
+Mounting the volume with the noexec option means that no executables on the
+volume will be able to executed within the pod.
+
+Mounting the volume with the nodev option means that no devices on the volume
+will be able to be used by processes within the pod. By default volumes
+are mounted with `nodev`.
+
+If the `<source-dir>` is a mount point, then "dev", "suid", and "exec" options are
+ignored by the kernel.
+
+Use `df <source-dir>` to figure out the source mount and then use
+`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to figure out propagation
+properties of source mount. If `findmnt` utility is not available, then one
+can look at the mount entry for the source mount point in `/proc/self/mountinfo`. Look
+at `optional fields` and see if any propagation properties are specified.
+`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if
+nothing is there that means mount is `private`. <sup>[[1]](#Footnote1)</sup>
+
+To change propagation properties of a mount point use `mount` command. For
+example, if one wants to bind mount source directory `/foo` one can do
+`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This
+will convert /foo into a `shared` mount point. Alternatively one can directly
+change propagation properties of source mount. Say `/` is source mount for
+`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount.
+
+Note: if the user only has access rights via a group, accessing the volume
+from inside a rootless pod will fail.
+
+#### **--volumes-from**[=*CONTAINER*[:*OPTIONS*]]
+
+Mount volumes from the specified container(s). Used to share volumes between
+containers and pods. The *options* is a comma-separated list with the following available elements:
+
+* **rw**|**ro**
+* **z**
+
+Mounts already mounted volumes from a source container into another
+pod. Must supply the source's container-id or container-name.
+To share a volume, use the --volumes-from option when running
+the target container. Volumes can be shared even if the source container
+is not running.
+
+By default, Podman mounts the volumes in the same mode (read-write or
+read-only) as it is mounted in the source container.
+This can be changed by adding a `ro` or `rw` _option_.
+
+Labeling systems like SELinux require that proper labels are placed on volume
+content mounted into a pod. Without a label, the security system might
+prevent the processes running inside the container from using the content. By
+default, Podman does not change the labels set by the OS.
+
+To change a label in the pod context, add `z` to the volume mount.
+This suffix tells Podman to relabel file objects on the shared volumes. The `z`
+option tells Podman that two entities share the volume content. As a result,
+Podman labels the content with a shared content label. Shared volume labels allow
+all containers to read/write content.
+
+If the location of the volume from the source container overlaps with
+data residing on a target pod, then the volume hides
+that data on the target.
+
+
+## EXAMPLES
+```
+# podman pod clone pod-name
+6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584
+```
+
+```
+# podman pod clone --name=cloned-pod
+d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7
+6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584
+```
+
+```
+# podman pod clone --destroy --cpus=5 d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7
+6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584
+```
+
+```
+# podman pod clone 2d4d4fca7219b4437e0d74fcdc272c4f031426a6eacd207372691207079551de new_name
+5a9b7851013d326aa4ac4565726765901b3ecc01fcbc0f237bc7fd95588a24f9
+```
+## SEE ALSO
+**[podman-pod-create(1)](podman-pod-create.1.md)**
+
+## HISTORY
+May 2022, Originally written by Charlie Doern <cdoern@redhat.com>
diff --git a/docs/source/markdown/podman-pod-create.1.md b/docs/source/markdown/podman-pod-create.1.md
index fa431b611..53d1e3327 100644
--- a/docs/source/markdown/podman-pod-create.1.md
+++ b/docs/source/markdown/podman-pod-create.1.md
@@ -4,14 +4,24 @@
podman\-pod\-create - Create a new pod
## SYNOPSIS
-**podman pod create** [*options*]
+**podman pod create** [*options*] [*name*]
## DESCRIPTION
Creates an empty pod, or unit of multiple containers, and prepares it to have
-containers added to it. The pod id is printed to STDOUT. You can then use
-**podman create --pod `<pod_id|pod_name>` ...** to add containers to the pod, and
-**podman pod start `<pod_id|pod_name>`** to start the pod.
+containers added to it. The pod can be created with a specific name. If a name
+is not given a random name is generated. The pod id is printed to STDOUT. You
+can then use **podman create --pod `<pod_id|pod_name>` ...** to add containers
+to the pod, and **podman pod start `<pod_id|pod_name>`** to start the pod.
+
+The operator can identify a pod in three ways:
+UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”)
+UUID short identifier (“f78375b1c487”)
+Name (“jonah”)
+
+podman generates a UUID for each pod, and if a name is not assigned
+to the container with **--name** then a random string name will be generated
+for it. The name is useful any place you need to identify a pod.
## OPTIONS
@@ -154,6 +164,16 @@ according to RFC4862.
To specify multiple static MAC addresses per pod, set multiple networks using the **--network** option with a static MAC address specified for each using the `mac` mode for that option.
+#### **--memory**, **-m**=*limit*
+
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
+
+Constrains the memory available to a container. If the host
+supports swap memory, then the **-m** memory setting can be larger than physical
+RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
+not limited. The actual limit may be rounded up to a multiple of the operating
+system's page size (the value would be very large, that's millions of trillions).
+
#### **--name**=*name*, **-n**
@@ -217,16 +237,30 @@ Set the PID mode for the pod. The default is to create a private PID namespace f
Write the pod ID to the file.
-#### **--publish**=*port*, **-p**
+#### **--publish**, **-p**=[[_ip_:][_hostPort_]:]_containerPort_[/_protocol_]
-Publish a port or range of ports from the pod to the host.
+Publish a container's port, or range of ports, within this pod to the host.
-Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort`
Both hostPort and containerPort can be specified as a range of ports.
-When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range.
-Use `podman port` to see the actual mapping: `podman port CONTAINER $CONTAINERPORT`.
+When specifying ranges for both, the number of container ports in the
+range must match the number of host ports in the range.
+
+If host IP is set to 0.0.0.0 or not set at all, the port will be bound on all IPs on the host.
+
+By default, Podman will publish TCP ports. To publish a UDP port instead, give
+`udp` as protocol. To publish both TCP and UDP ports, set `--publish` twice,
+with `tcp`, and `udp` as protocols respectively. Rootful containers can also
+publish ports using the `sctp` protocol.
+
+Host port does not have to be specified (e.g. `podman run -p 127.0.0.1::80`).
+If it is not, the container port will be randomly assigned a port on the host.
+
+Use **podman port** to see the actual mapping: `podman port $CONTAINER $CONTAINERPORT`.
+
+**Note:** You must not publish ports of containers in the pod individually,
+but only by the pod itself.
-NOTE: This cannot be modified once the pod is created.
+**Note:** This cannot be modified once the pod is created.
#### **--replace**
@@ -259,23 +293,14 @@ Note: Labeling can be disabled for all pods/containers by setting label=false in
- `proc-opts=OPTIONS` : Comma-separated list of options to use for the /proc mount. More details for the
possible mount options are specified in the **proc(5)** man page.
-- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read-only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting label=false in the **containers.conf** (`/etc/containers/containers.conf` or `$HOME/.config/containers/containers.conf`) file.
#### **--share**=*namespace*
-A comma-separated list of kernel namespaces to share. If none or "" is specified, no namespaces will be shared. The namespaces to choose from are cgroup, ipc, net, pid, uts.
-
-The operator can identify a pod in three ways:
-UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”)
-UUID short identifier (“f78375b1c487”)
-Name (“jonah”)
-
-podman generates a UUID for each pod, and if a name is not assigned
-to the container with **--name** then a random string name will be generated
-for it. The name is useful any place you need to identify a pod.
+A comma-separated list of kernel namespaces to share. If none or "" is specified, no namespaces will be shared. The namespaces to choose from are cgroup, ipc, net, pid, uts. If the option is prefixed with a "+" then the namespace is appended to the default list, otherwise it replaces the default list. Defaults matches Kubernetes default (ipc, net, uts)
#### **--share-parent**
@@ -283,6 +308,12 @@ This boolean determines whether or not all containers entering the pod will use
Note: This options conflict with **--share=cgroup** since that would set the pod as the cgroup parent but enter the container into the same cgroupNS as the infra container.
+#### **--shm-size**=*size*
+
+Size of `/dev/shm` (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
+If the unit is omitted, the system uses bytes. If the size is omitted, the system uses `64m`.
+When size is `0`, there is no limit on the amount of memory used for IPC by the pod. This option conflicts with **--ipc=host** when running containers.
+
#### **--subgidname**=*name*
Name for GID map from the `/etc/subgid` file. Using this flag will run the container with user namespace enabled. This flag conflicts with `--userns` and `--gidmap`.
@@ -291,6 +322,7 @@ Name for GID map from the `/etc/subgid` file. Using this flag will run the conta
Name for UID map from the `/etc/subuid` file. Using this flag will run the container with user namespace enabled. This flag conflicts with `--userns` and `--uidmap`.
+
#### **--sysctl**=_name_=_value_
Configure namespace kernel parameters for all containers in the pod.
@@ -345,9 +377,17 @@ Valid _mode_ values are:
- *host*: run in the user namespace of the caller. The processes running in the container will have the same privileges on the host as any other process launched by the calling user (default).
- - *keep-id*: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is ignored for containers created by the root user.
+ - *keep-id*: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is not allowed for containers created by the root user.
- - *nomap*: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is ignored for containers created by the root user.
+ - *nomap*: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is not allowed for containers created by the root user.
+
+#### **--uts**=*mode*
+
+Set the UTS namespace mode for the pod. The following values are supported:
+
+- **host**: use the host's UTS namespace inside the pod.
+- **private**: create a new namespace for the pod (default).
+- **ns:[path]**: run the pod in the given existing UTS namespace.
#### **--volume**, **-v**[=*[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]
@@ -466,12 +506,10 @@ will be visible inside container but not the other way around. <sup>[[1]](#Footn
To control mount propagation property of a volume one can use the [**r**]**shared**,
[**r**]**slave**, [**r**]**private** or the [**r**]**unbindable** propagation flag.
-Propagation property can be specified only for bind mounted volumes and not for
-internal volumes or named volumes. For mount propagation to work the source mount
-point (the mount point where source dir is mounted on) has to have the right propagation
-properties. For shared volumes, the source mount point has to be shared. And for
-slave volumes, the source mount point has to be either shared or slave.
-<sup>[[1]](#Footnote1)</sup>
+For mount propagation to work the source mount point (the mount point where source dir
+is mounted on) has to have the right propagation properties. For shared volumes, the
+source mount point has to be shared. And for slave volumes, the source mount point
+has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
If you want to recursively mount a volume and all of its submounts into a
pod, then you can use the `rbind` option. By default the bind option is
@@ -549,9 +587,11 @@ that data on the target.
```
$ podman pod create --name test
+$ podman pod create mypod
+
$ podman pod create --infra=false
-$ podman pod create --infra-command /top
+$ podman pod create --infra-command /top toppod
$ podman pod create --publish 8443:443
diff --git a/docs/source/markdown/podman-pod.1.md b/docs/source/markdown/podman-pod.1.md
index 71e4dcb59..c38235e89 100644
--- a/docs/source/markdown/podman-pod.1.md
+++ b/docs/source/markdown/podman-pod.1.md
@@ -13,6 +13,7 @@ podman pod is a set of subcommands that manage pods, or groups of containers.
| Command | Man Page | Description |
| ------- | ------------------------------------------------- | --------------------------------------------------------------------------------- |
+| clone | [podman-pod-clone(1)](podman-pod-clone.1.md) | Creates a copy of an existing pod. |
| create | [podman-pod-create(1)](podman-pod-create.1.md) | Create a new pod. |
| exists | [podman-pod-exists(1)](podman-pod-exists.1.md) | Check if a pod exists in local storage. |
| inspect | [podman-pod-inspect(1)](podman-pod-inspect.1.md) | Displays information describing a pod. |
diff --git a/docs/source/markdown/podman-port.1.md b/docs/source/markdown/podman-port.1.md
index a72fc12bf..ebfeeccd7 100644
--- a/docs/source/markdown/podman-port.1.md
+++ b/docs/source/markdown/podman-port.1.md
@@ -9,7 +9,7 @@ podman\-port - List port mappings for a container
**podman container port** [*options*] *container* [*private-port*[/*proto*]]
## DESCRIPTION
-List port mappings for the *container* or lookup the public-facing port that is NAT-ed to the *private-port*.
+List port mappings for the *container* or look up the public-facing port that is NAT-ed to the *private-port*.
## OPTIONS
diff --git a/docs/source/markdown/podman-push.1.md b/docs/source/markdown/podman-push.1.md
index 74555c11b..25c1e024a 100644
--- a/docs/source/markdown/podman-push.1.md
+++ b/docs/source/markdown/podman-push.1.md
@@ -95,7 +95,7 @@ When writing the output image, suppress progress output
#### **--remove-signatures**
-Discard any pre-existing signatures in the image. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
+Discard any pre-existing signatures in the image.
#### **--sign-by**=*key*
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index a16ee9394..84e93efbe 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -385,7 +385,7 @@ You need to specify multi option commands in the form of a json string.
Set environment variables.
-This option allows arbitrary environment variables that are available for the process to be launched inside of the container. If an environment variable is specified without a value, Podman will check the host environment for a value and set the variable only if it is set on the host. If an environment variable ending in __*__ is specified, Podman will search the host environment for variables starting with the prefix and will add those variables to the container. If an environment variable with a trailing __*__ is specified, then a value must be supplied.
+This option allows arbitrary environment variables that are available for the process to be launched inside of the container. If an environment variable is specified without a value, Podman will check the host environment for a value and set the variable only if it is set on the host. As a special case, if an environment variable ending in __*__ is specified without a value, Podman will search the host environment for variables starting with the prefix and will add those variables to the container.
See [**Environment**](#environment) note below for precedence and examples.
@@ -597,7 +597,7 @@ To specify multiple static MAC addresses per container, set multiple networks us
#### **--memory**, **-m**=_number_[_unit_]
-Memory limit. A _unit_ can be **b** (bytes), **k** (kilobytes), **m** (megabytes), or **g** (gigabytes).
+Memory limit. A _unit_ can be **b** (bytes), **k** (kibibytes), **m** (mebibytes), or **g** (gibibytes).
Allows you to constrain the memory available to a container. If the host
supports swap memory, then the **-m** memory setting can be larger than physical
@@ -607,7 +607,7 @@ system's page size (the value would be very large, that's millions of trillions)
#### **--memory-reservation**=_number_[_unit_]
-Memory soft limit. A _unit_ can be **b** (bytes), **k** (kilobytes), **m** (megabytes), or **g** (gigabytes).
+Memory soft limit. A _unit_ can be **b** (bytes), **k** (kibibytes), **m** (mebibytes), or **g** (gibibytes).
After setting memory reservation, when the system detects memory contention
or low memory, containers are forced to restrict their consumption to their
@@ -618,7 +618,7 @@ as memory limit.
#### **--memory-swap**=_number_[_unit_]
A limit value equal to memory plus swap.
-A _unit_ can be **b** (bytes), **k** (kilobytes), **m** (megabytes), or **g** (gigabytes).
+A _unit_ can be **b** (bytes), **k** (kibibytes), **m** (mebibytes), or **g** (gibibytes).
Must be used with the **-m** (**--memory**) flag.
The argument value should always be larger than that of
@@ -679,7 +679,7 @@ Current supported mount TYPEs are **bind**, **volume**, **image**, **tmpfs** and
· bind-propagation: shared, slave, private, unbindable, rshared, rslave, runbindable, or rprivate(default). See also mount(2).
- . bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive.
+ . bind-nonrecursive: do not set up a recursive bind mount. By default it is recursive.
. relabel: shared, private.
@@ -864,22 +864,27 @@ points, Apparmor/SELinux separation, and Seccomp filters are all disabled.
Rootless containers cannot have more privileges than the account that launched them.
-#### **--publish**, **-p**=_ip_:_hostPort_:_containerPort_ | _ip_::_containerPort_ | _hostPort_:_containerPort_ | _containerPort_
+#### **--publish**, **-p**=[[_ip_:][_hostPort_]:]_containerPort_[/_protocol_]
Publish a container's port, or range of ports, to the host.
Both hostPort and containerPort can be specified as a range of ports.
-
-When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range.
+When specifying ranges for both, the number of container ports in the
+range must match the number of host ports in the range.
If host IP is set to 0.0.0.0 or not set at all, the port will be bound on all IPs on the host.
+By default, Podman will publish TCP ports. To publish a UDP port instead, give
+`udp` as protocol. To publish both TCP and UDP ports, set `--publish` twice,
+with `tcp`, and `udp` as protocols respectively. Rootful containers can also
+publish ports using the `sctp` protocol.
+
Host port does not have to be specified (e.g. `podman run -p 127.0.0.1::80`).
If it is not, the container port will be randomly assigned a port on the host.
-Use **podman port** to see the actual mapping: **podman port $CONTAINER $CONTAINERPORT**.
+Use **podman port** to see the actual mapping: `podman port $CONTAINER $CONTAINERPORT`.
-**Note:** if a container will be run within a pod, it is not necessary to publish the port for
+**Note:** If a container will be run within a pod, it is not necessary to publish the port for
the containers in the pod. The port must only be published by the pod itself. Pod network
stacks act like the network stack on the host - you have a variety of containers in the pod,
and programs in the container, all sharing a single interface and IP address, and
@@ -900,13 +905,14 @@ When using this option, Podman will bind any exposed port to a random port on th
within an ephemeral port range defined by */proc/sys/net/ipv4/ip_local_port_range*.
To find the mapping between the host ports and the exposed ports, use **podman port**.
-#### **--pull**=**always**|**missing**|**never**
+#### **--pull**=**always**|**missing**|**never**|**newer**
-Pull image before running. The default is **missing**.
+Pull image policy. The default is **missing**.
-- **missing**: attempt to pull the latest image from the registries listed in registries.conf if a local image does not exist.Raise an error if the image is not in any listed registry and is not present locally.
-- **always**: Pull the image from the first registry it is found in as listed in registries.conf. Raise an error if not found in the registries, even if the image is present locally.
-- **never**: do not pull the image from the registry, use only the local version. Raise an error if the image is not present locally.
+- **always**: Always pull the image and throw an error if the pull fails.
+- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails.
+- **never**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found.
+- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found.
#### **--quiet**, **-q**
@@ -914,11 +920,11 @@ Suppress output information when pulling images
#### **--read-only**
-Mount the container's root filesystem as read only.
+Mount the container's root filesystem as read-only.
By default a container will have its root filesystem writable allowing processes
to write files anywhere. By specifying the **--read-only** flag, the container will have
-its root filesystem mounted as read only prohibiting any writes.
+its root filesystem mounted as read-only prohibiting any writes.
#### **--read-only-tmpfs**
@@ -1046,14 +1052,14 @@ Note: Labeling can be disabled for all containers by setting label=false in the
- **proc-opts**=_OPTIONS_ : Comma-separated list of options to use for the /proc mount. More details
for the possible mount options are specified in the **proc(5)** man page.
-- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.**. The default paths that are read only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, **/proc/sysrq-trigger**, **/sys/fs/cgroup**.
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.**. The default paths that are read-only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, **/proc/sysrq-trigger**, **/sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting **label=false** in the **containers.conf**(5) file.
#### **--shm-size**=_number_[_unit_]
-Size of _/dev/shm_. A _unit_ can be **b** (bytes), **k** (kilobytes), **m** (megabytes), or **g** (gigabytes).
+Size of _/dev/shm_. A _unit_ can be **b** (bytes), **k** (kibibytes), **m** (mebibytes), or **g** (gibibytes).
If you omit the unit, the system uses bytes. If you omit the size entirely, the default is **64m**.
When _size_ is **0**, there is no limit on the amount of memory used for IPC by the container.
@@ -1324,9 +1330,9 @@ The rootless option `--userns=keep-id` uses all the subuids and subgids of the u
**host**: run in the user namespace of the caller. The processes running in the container will have the same privileges on the host as any other process launched by the calling user (default).
-**keep-id**: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is ignored for containers created by the root user.
+**keep-id**: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is not allowed for containers created by the root user.
-**nomap**: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is ignored for containers created by the root user.
+**nomap**: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is not allowed for containers created by the root user.
**ns:**_namespace_: run the container in the given existing user namespace.
@@ -1357,13 +1363,14 @@ The _options_ is a comma-separated list and can be: <sup>[[1]](#Footnote1)</sup>
* **rw**|**ro**
* **z**|**Z**
-* [**r**]**shared**|[**r**]**slave**|[**r**]**private**[**r**]**unbindable**
-* [**r**]**bind**
-* [**no**]**exec**
-* [**no**]**dev**
-* [**no**]**suid**
* [**O**]
* [**U**]
+* [**no**]**copy**
+* [**no**]**dev**
+* [**no**]**exec**
+* [**no**]**suid**
+* [**r**]**bind**
+* [**r**]**shared**|[**r**]**slave**|[**r**]**private**[**r**]**unbindable**
The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The volume
will be mounted into the container at this directory.
@@ -1473,14 +1480,12 @@ visible on host and vice versa. Making a volume **slave** enables only one
way mount propagation and that is mounts done on host under that volume
will be visible inside container but not the other way around. <sup>[[1]](#Footnote1)</sup>
-To control mount propagation property of volume one can use [**r**]**shared**,
-[**r**]**slave**, [**r**]**private** or [**r**]**unbindable** propagation flag.
-Propagation property can be specified only for bind mounted volumes and not for
-internal volumes or named volumes. For mount propagation to work source mount
-point (mount point where source dir is mounted on) has to have right propagation
-properties. For shared volumes, source mount point has to be shared. And for
-slave volumes, source mount has to be either shared or slave.
-<sup>[[1]](#Footnote1)</sup>
+To control mount propagation property of a volume one can use the [**r**]**shared**,
+[**r**]**slave**, [**r**]**private** or the [**r**]**unbindable** propagation flag.
+For mount propagation to work the source mount point (the mount point where source dir
+is mounted on) has to have the right propagation properties. For shared volumes, the
+source mount point has to be shared. And for slave volumes, the source mount point
+has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
If you want to recursively mount a volume and all of its submounts into a
container, then you can use the **rbind** option. By default the bind option is
@@ -1599,7 +1604,7 @@ content. Installing packages into _/usr_, for example. In production,
applications seldom need to write to the image. Container applications write
to volumes if they need to write to file systems at all. Applications can be
made more secure by running them in read-only mode using the **--read-only** switch.
-This protects the containers image from modification. Read only containers may
+This protects the containers image from modification. Read-only containers may
still need to write temporary data. The best way to handle this is to mount
tmpfs directories on _/run_ and _/tmp_.
@@ -1880,7 +1885,7 @@ $ podman run --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello
Podman allows for the configuration of storage by changing the values
in the _/etc/container/storage.conf_ or by using global options. This
-shows how to setup and use fuse-overlayfs for a one time run of busybox
+shows how to set up and use fuse-overlayfs for a one time run of busybox
using global options.
```
@@ -1979,15 +1984,15 @@ in the following order of precedence (later entries override earlier entries):
- **--env-file**: Any environment variables specified via env-files. If multiple files specified, then they override each other in order of entry.
- **--env**: Any environment variables specified will override previous settings.
-Run containers and set the environment ending with a __*__ and a __*****__:
+Run containers and set the environment ending with a __*__.
+The trailing __*__ glob functionality is only active when no value is specified:
```
$ export ENV1=a
-$ podman run --env ENV* alpine printenv ENV1
-a
-
-$ podman run --env ENV*****=b alpine printenv ENV*****
-b
+$ podman run --env 'ENV*' alpine env | grep ENV
+ENV1=a
+$ podman run --env 'ENV*=b' alpine env | grep ENV
+ENV*=b
```
## CONMON
diff --git a/docs/source/markdown/podman-system-prune.1.md b/docs/source/markdown/podman-system-prune.1.md
index fb9ed44d6..c4c17fbe5 100644
--- a/docs/source/markdown/podman-system-prune.1.md
+++ b/docs/source/markdown/podman-system-prune.1.md
@@ -1,13 +1,13 @@
% podman-system-prune(1)
## NAME
-podman\-system\-prune - Remove all unused pod, container, image and volume data
+podman\-system\-prune - Remove all unused pods, containers, images, networks, and volume data
## SYNOPSIS
**podman system prune** [*options*]
## DESCRIPTION
-**podman system prune** removes all unused containers (both dangling and unreferenced), pods and optionally, volumes from local storage.
+**podman system prune** removes all unused containers (both dangling and unreferenced), pods, networks, and optionally, volumes from local storage.
With the **--all** option, you can delete all unused images. Unused images are dangling images as well as any image that does not have any containers based on it.
@@ -16,7 +16,7 @@ By default, volumes are not removed to prevent important data from being deleted
## OPTIONS
#### **--all**, **-a**
-Recursively remove all unused pod, container, image and volume data (Maximum 50 iterations.)
+Recursively remove all unused pods, containers, images, networks, and volume data. (Maximum 50 iterations.)
#### **--filter**=*filters*
diff --git a/docs/source/markdown/podman-system-service.1.md b/docs/source/markdown/podman-system-service.1.md
index 176d73eda..99fde8ce4 100644
--- a/docs/source/markdown/podman-system-service.1.md
+++ b/docs/source/markdown/podman-system-service.1.md
@@ -21,6 +21,10 @@ The REST API provided by **podman system service** is split into two parts: a co
Documentation for the latter is available at *https://docs.podman.io/en/latest/_static/api.html*.
Both APIs are versioned, but the server will not reject requests with an unsupported version set.
+Please note that the API grants full access to Podman's capabilities, and as such should be treated as allowing arbitrary code execution as the user running the API.
+As such, we strongly recommend against making the API socket available via the network.
+The default configuration (a Unix socket with permissions set to only allow the user running Podman) is the most secure way of running the API.
+
Note: The default systemd unit files (system and user) change the log-level option to *info* from *error*. This change provides additional information on each API call.
## OPTIONS
diff --git a/docs/source/markdown/podman-system.1.md b/docs/source/markdown/podman-system.1.md
index ae18aca88..7469eb79d 100644
--- a/docs/source/markdown/podman-system.1.md
+++ b/docs/source/markdown/podman-system.1.md
@@ -11,16 +11,16 @@ The system command allows you to manage the podman systems
## COMMANDS
-| Command | Man Page | Description |
-| ------- | ------------------------------------------------------------ | -------------------------------------------------------------------- |
-| connection | [podman-system-connection(1)](podman-system-connection.1.md) | Manage the destination(s) for Podman service(s) |
-| df | [podman-system-df(1)](podman-system-df.1.md) | Show podman disk usage. |
-| info | [podman-system-info(1)](podman-info.1.md) | Displays Podman related system information. |
-| migrate | [podman-system-migrate(1)](podman-system-migrate.1.md) | Migrate existing containers to a new podman version. |
-| prune | [podman-system-prune(1)](podman-system-prune.1.md) | Remove all unused pod, container, image and volume data. |
-| renumber | [podman-system-renumber(1)](podman-system-renumber.1.md) | Migrate lock numbers to handle a change in maximum number of locks. |
-| reset | [podman-system-reset(1)](podman-system-reset.1.md) | Reset storage back to initial state. |
-| service | [podman-system-service(1)](podman-system-service.1.md) | Run an API service |
+| Command | Man Page | Description |
+| ------- | ------------------------------------------------------------ | ------------------------------------------------------------------------ |
+| connection | [podman-system-connection(1)](podman-system-connection.1.md) | Manage the destination(s) for Podman service(s) |
+| df | [podman-system-df(1)](podman-system-df.1.md) | Show podman disk usage. |
+| info | [podman-system-info(1)](podman-info.1.md) | Displays Podman related system information. |
+| migrate | [podman-system-migrate(1)](podman-system-migrate.1.md) | Migrate existing containers to a new podman version. |
+| prune | [podman-system-prune(1)](podman-system-prune.1.md) | Remove all unused pods, containers, images, networks, and volume data. |
+| renumber | [podman-system-renumber(1)](podman-system-renumber.1.md) | Migrate lock numbers to handle a change in maximum number of locks. |
+| reset | [podman-system-reset(1)](podman-system-reset.1.md) | Reset storage back to initial state. |
+| service | [podman-system-service(1)](podman-system-service.1.md) | Run an API service |
## SEE ALSO
**[podman(1)](podman.1.md)**
diff --git a/docs/source/markdown/podman-volume-create.1.md b/docs/source/markdown/podman-volume-create.1.md
index 06fadcaa1..f43e647bf 100644
--- a/docs/source/markdown/podman-volume-create.1.md
+++ b/docs/source/markdown/podman-volume-create.1.md
@@ -4,7 +4,7 @@
podman\-volume\-create - Create a new volume
## SYNOPSIS
-**podman volume create** [*options*]
+**podman volume create** [*options*] [*name*]
## DESCRIPTION
@@ -31,16 +31,17 @@ Set metadata for a volume (e.g., --label mykey=value).
Set driver specific options.
For the default driver, **local**, this allows a volume to be configured to mount a filesystem on the host.
-For the `local` driver the following options are supported: `type`, `device`, and `o`.
+For the `local` driver the following options are supported: `type`, `device`, `o`, and `[no]copy`.
The `type` option sets the type of the filesystem to be mounted, and is equivalent to the `-t` flag to **mount(8)**.
The `device` option sets the device to be mounted, and is equivalent to the `device` argument to **mount(8)**.
+The `copy` option enables copying files from the container image path where the mount is created to the newly created volume on the first run. `copy` is the default.
The `o` option sets options for the mount, and is equivalent to the `-o` flag to **mount(8)** with these exceptions:
- The `o` option supports `uid` and `gid` options to set the UID and GID of the created volume that are not normally supported by **mount(8)**.
- The `o` option supports the `size` option to set the maximum size of the created volume, the `inodes` option to set the maximum number of inodes for the volume and `noquota` to completely disable quota support even for tracking of disk usage. Currently these flags are only supported on "xfs" file system mounted with the `prjquota` flag described in the **xfs_quota(8)** man page.
- - The `o` option supports .
- - Using volume options other then the UID/GID options with the **local** driver requires root privileges.
+ - The `o` option supports using volume options other than the UID/GID options with the **local** driver and requires root privileges.
+ - The `o` options supports the `timeout` option which allows users to set a driver specific timeout in seconds before volume creation fails. For example, **--opts=o=timeout=10** sets a driver timeout of 10 seconds.
When not using the **local** driver, the given options are passed directly to the volume plugin. In this case, supported options are dictated by the plugin in question, not Podman.
diff --git a/docs/source/markdown/podman-volume-import.1.md b/docs/source/markdown/podman-volume-import.1.md
index 71956f43a..4ae9ae1e2 100644
--- a/docs/source/markdown/podman-volume-import.1.md
+++ b/docs/source/markdown/podman-volume-import.1.md
@@ -1,7 +1,7 @@
% podman-volume-import(1)
## NAME
-podman\-volume\-import - Import tarball contents into a podman volume
+podman\-volume\-import - Import tarball contents into an existing podman volume
## SYNOPSIS
**podman volume import** *volume* [*source*]
@@ -11,9 +11,9 @@ podman\-volume\-import - Import tarball contents into a podman volume
**podman volume import** imports the contents of a tarball into the podman volume's mount point.
**podman volume import** can consume piped input when using `-` as source path.
-Note: Following command is not supported by podman-remote.
+The given volume must already exist and will not be created by podman volume import.
-**podman volume import VOLUME [SOURCE]**
+Note: Following command is not supported by podman-remote.
#### **--help**
diff --git a/docs/source/markdown/podman-volume-reload.1.md b/docs/source/markdown/podman-volume-reload.1.md
new file mode 100644
index 000000000..5b9e9b9ac
--- /dev/null
+++ b/docs/source/markdown/podman-volume-reload.1.md
@@ -0,0 +1,29 @@
+% podman-volume-reload(1)
+
+## NAME
+podman\-volume\-reload - Reload all volumes from volumes plugins
+
+## SYNOPSIS
+**podman volume reload**
+
+## DESCRIPTION
+
+**podman volume reload** checks all configured volume plugins and updates the libpod database with all available volumes.
+Existing volumes are also removed from the database when they are no longer present in the plugin.
+
+This command it is best effort and cannot guarantee a perfect state because plugins can be modified from the outside at any time.
+
+Note: This command is not supported with podman-remote.
+
+## EXAMPLES
+
+```
+$ podman volume reload
+Added:
+vol6
+Removed:
+t3
+```
+
+## SEE ALSO
+**[podman(1)](podman.1.md)**, **[podman-volume(1)](podman-volume.1.md)**
diff --git a/docs/source/markdown/podman-volume.1.md b/docs/source/markdown/podman-volume.1.md
index d05f007c8..a437590b3 100644
--- a/docs/source/markdown/podman-volume.1.md
+++ b/docs/source/markdown/podman-volume.1.md
@@ -16,11 +16,12 @@ podman volume is a set of subcommands that manage volumes.
| create | [podman-volume-create(1)](podman-volume-create.1.md) | Create a new volume. |
| exists | [podman-volume-exists(1)](podman-volume-exists.1.md) | Check if the given volume exists. |
| export | [podman-volume-export(1)](podman-volume-export.1.md) | Exports volume to external tar. |
-| import | [podman-volume-import(1)](podman-volume-import.1.md) | Import tarball contents into a podman volume. |
+| import | [podman-volume-import(1)](podman-volume-import.1.md) | Import tarball contents into an existing podman volume. |
| inspect | [podman-volume-inspect(1)](podman-volume-inspect.1.md) | Get detailed information on one or more volumes. |
| ls | [podman-volume-ls(1)](podman-volume-ls.1.md) | List all the available volumes. |
| mount | [podman-volume-mount(1)](podman-volume-mount.1.md) | Mount a volume filesystem. |
| prune | [podman-volume-prune(1)](podman-volume-prune.1.md) | Remove all unused volumes. |
+| reload | [podman-volume-reload(1)](podman-volume-reload.1.md) | Reload all volumes from volumes plugins. |
| rm | [podman-volume-rm(1)](podman-volume-rm.1.md) | Remove one or more volumes. |
| unmount | [podman-volume-unmount(1)](podman-volume-unmount.1.md) | Unmount a volume. |
diff --git a/docs/standalone-styling.css b/docs/standalone-styling.css
new file mode 100644
index 000000000..37721829c
--- /dev/null
+++ b/docs/standalone-styling.css
@@ -0,0 +1,603 @@
+/* github.com/n1hility/standalone-styling (modified variant of github.com/bgw/pan-am) */
+/*! normalize.css v3.0.0 | MIT License | git.io/normalize */
+/**
+ * 1. Set default font family to sans-serif.
+ * 2. Prevent iOS text size adjust after orientation change, without disabling
+ * user zoom.
+ */
+html {
+ font-family: sans-serif;
+ /* 1 */
+ -ms-text-size-adjust: 100%;
+ /* 2 */
+ -webkit-text-size-adjust: 100%;
+ /* 2 */ }
+
+/**
+ * Remove default margin.
+ */
+body {
+ margin: 0; }
+
+/* HTML5 display definitions
+ ========================================================================== */
+/**
+ * Correct `block` display not defined in IE 8/9.
+ */
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+nav,
+section,
+summary {
+ display: block; }
+
+/**
+ * 1. Correct `inline-block` display not defined in IE 8/9.
+ * 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.
+ */
+audio,
+canvas,
+progress,
+video {
+ display: inline-block;
+ /* 1 */
+ vertical-align: baseline;
+ /* 2 */ }
+
+/**
+ * Prevent modern browsers from displaying `audio` without controls.
+ * Remove excess height in iOS 5 devices.
+ */
+audio:not([controls]) {
+ display: none;
+ height: 0; }
+
+/**
+ * Address `[hidden]` styling not present in IE 8/9.
+ * Hide the `template` element in IE, Safari, and Firefox < 22.
+ */
+[hidden],
+template {
+ display: none; }
+
+/* Links
+ ========================================================================== */
+/**
+ * Remove the gray background color from active links in IE 10.
+ */
+a {
+ background: transparent; }
+
+/**
+ * Improve readability when focused and also mouse hovered in all browsers.
+ */
+a:active,
+a:hover {
+ outline: 0; }
+
+/* Text-level semantics
+ ========================================================================== */
+/**
+ * Address styling not present in IE 8/9, Safari 5, and Chrome.
+ */
+abbr[title] {
+ border-bottom: 1px dotted; }
+
+/**
+ * Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome.
+ */
+b,
+strong {
+ font-weight: bold; }
+
+/**
+ * Address styling not present in Safari 5 and Chrome.
+ */
+dfn {
+ font-style: italic; }
+
+/**
+ * Address variable `h1` font-size and margin within `section` and `article`
+ * contexts in Firefox 4+, Safari 5, and Chrome.
+ */
+h1 {
+ font-size: 2em;
+ margin: 0.67em 0; }
+
+/**
+ * Address styling not present in IE 8/9.
+ */
+mark {
+ background: #ff0;
+ color: #000; }
+
+/**
+ * Address inconsistent and variable font size in all browsers.
+ */
+small {
+ font-size: 80%; }
+
+/**
+ * Prevent `sub` and `sup` affecting `line-height` in all browsers.
+ */
+sub,
+sup {
+ font-size: 75%;
+ line-height: 0;
+ position: relative;
+ vertical-align: baseline; }
+
+sup {
+ top: -0.5em; }
+
+sub {
+ bottom: -0.25em; }
+
+/* Embedded content
+ ========================================================================== */
+/**
+ * Remove border when inside `a` element in IE 8/9.
+ */
+img {
+ border: 0; }
+
+/**
+ * Correct overflow displayed oddly in IE 9.
+ */
+svg:not(:root) {
+ overflow: hidden; }
+
+/* Grouping content
+ ========================================================================== */
+/**
+ * Address margin not present in IE 8/9 and Safari 5.
+ */
+figure {
+ margin: 1em 40px; }
+
+/**
+ * Address differences between Firefox and other browsers.
+ */
+hr {
+ -moz-box-sizing: content-box;
+ box-sizing: content-box;
+ height: 0; }
+
+/**
+ * Contain overflow in all browsers.
+ */
+pre {
+ overflow: auto; }
+
+/**
+ * Address odd `em`-unit font size rendering in all browsers.
+ */
+code,
+kbd,
+pre,
+samp {
+ font-family: monospace, monospace;
+ font-size: 1em; }
+
+/* Forms
+ ========================================================================== */
+/**
+ * Known limitation: by default, Chrome and Safari on OS X allow very limited
+ * styling of `select`, unless a `border` property is set.
+ */
+/**
+ * 1. Correct color not being inherited.
+ * Known issue: affects color of disabled elements.
+ * 2. Correct font properties not being inherited.
+ * 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome.
+ */
+button,
+input,
+optgroup,
+select,
+textarea {
+ color: inherit;
+ /* 1 */
+ font: inherit;
+ /* 2 */
+ margin: 0;
+ /* 3 */ }
+
+/**
+ * Address `overflow` set to `hidden` in IE 8/9/10.
+ */
+button {
+ overflow: visible; }
+
+/**
+ * Address inconsistent `text-transform` inheritance for `button` and `select`.
+ * All other form control elements do not inherit `text-transform` values.
+ * Correct `button` style inheritance in Firefox, IE 8+, and Opera
+ * Correct `select` style inheritance in Firefox.
+ */
+button,
+select {
+ text-transform: none; }
+
+/**
+ * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`
+ * and `video` controls.
+ * 2. Correct inability to style clickable `input` types in iOS.
+ * 3. Improve usability and consistency of cursor style between image-type
+ * `input` and others.
+ */
+button,
+html input[type="button"],
+input[type="reset"],
+input[type="submit"] {
+ -webkit-appearance: button;
+ /* 2 */
+ cursor: pointer;
+ /* 3 */ }
+
+/**
+ * Re-set default cursor for disabled elements.
+ */
+button[disabled],
+html input[disabled] {
+ cursor: default; }
+
+/**
+ * Remove inner padding and border in Firefox 4+.
+ */
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+ border: 0;
+ padding: 0; }
+
+/**
+ * Address Firefox 4+ setting `line-height` on `input` using `!important` in
+ * the UA stylesheet.
+ */
+input {
+ line-height: normal; }
+
+/**
+ * It's recommended that you don't attempt to style these elements.
+ * Firefox's implementation doesn't respect box-sizing, padding, or width.
+ *
+ * 1. Address box sizing set to `content-box` in IE 8/9/10.
+ * 2. Remove excess padding in IE 8/9/10.
+ */
+input[type="checkbox"],
+input[type="radio"] {
+ box-sizing: border-box;
+ /* 1 */
+ padding: 0;
+ /* 2 */ }
+
+/**
+ * Fix the cursor style for Chrome's increment/decrement buttons. For certain
+ * `font-size` values of the `input`, it causes the cursor style of the
+ * decrement button to change from `default` to `text`.
+ */
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+ height: auto; }
+
+/**
+ * 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome.
+ * 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome
+ * (include `-moz` to future-proof).
+ */
+input[type="search"] {
+ -webkit-appearance: textfield;
+ /* 1 */
+ -moz-box-sizing: content-box;
+ -webkit-box-sizing: content-box;
+ /* 2 */
+ box-sizing: content-box; }
+
+/**
+ * Remove inner padding and search cancel button in Safari and Chrome on OS X.
+ * Safari (but not Chrome) clips the cancel button when the search input has
+ * padding (and `textfield` appearance).
+ */
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+ -webkit-appearance: none; }
+
+/**
+ * Define consistent border, margin, and padding.
+ */
+fieldset {
+ border: 1px solid #c0c0c0;
+ margin: 0 2px;
+ padding: 0.35em 0.625em 0.75em; }
+
+/**
+ * 1. Correct `color` not being inherited in IE 8/9.
+ * 2. Remove padding so people aren't caught out if they zero out fieldsets.
+ */
+legend {
+ border: 0;
+ /* 1 */
+ padding: 0;
+ /* 2 */ }
+
+/**
+ * Remove default vertical scrollbar in IE 8/9.
+ */
+textarea {
+ overflow: auto; }
+
+/**
+ * Don't inherit the `font-weight` (applied by a rule above).
+ * NOTE: the default cannot safely be changed in Chrome and Safari on OS X.
+ */
+optgroup {
+ font-weight: bold; }
+
+/* Tables
+ ========================================================================== */
+/**
+ * Remove most spacing between table cells.
+ */
+table {
+ border-collapse: collapse;
+ border-spacing: 0; }
+
+td,
+th {
+ padding: 0; }
+
+body {
+ font-family: san-serif;
+ background-color: #F8F8F8;
+ color: #111;
+ line-height: 1.3;
+ text-align: justify;
+ -moz-hyphens: auto;
+ -ms-hyphens: auto;
+ -webkit-hyphens: auto;
+ hyphens: auto; }
+ @media (max-width: 400px) {
+ body {
+ font-size: 12px;
+ margin-left: 10px;
+ margin-right: 10px;
+ margin-top: 10px;
+ margin-bottom: 15px; } }
+ @media (min-width: 401px) and (max-width: 600px) {
+ body {
+ font-size: 14px;
+ margin-left: 10px;
+ margin-right: 10px;
+ margin-top: 10px;
+ margin-bottom: 15px; } }
+ @media (min-width: 601px) and (max-width: 900px) {
+ body {
+ font-size: 15px;
+ margin-left: 100px;
+ margin-right: 100px;
+ margin-top: 20px;
+ margin-bottom: 25px; } }
+ @media (min-width: 901px) and (max-width: 1800px) {
+ body {
+ font-size: 17px;
+ margin-left: 200px;
+ margin-right: 200px;
+ margin-top: 30px;
+ margin-bottom: 25px;
+ max-width: 800px; } }
+ @media (min-width: 1801px) {
+ body {
+ font-size: 18px;
+ margin-left: 20%;
+ margin-right: 20%;
+ margin-top: 30px;
+ margin-bottom: 25px;
+ max-width: 1000px; } }
+
+p {
+ margin-top: 10px;
+ margin-bottom: 18px; }
+
+em {
+ font-style: italic; }
+
+strong {
+ font-weight: bold; }
+
+h1, h2, h3, h4, h5, h6 {
+ font-weight: bold;
+ padding-top: 0.25em;
+ margin-bottom: 0.15em; }
+
+header {
+ line-height: 2.475em;
+ padding-bottom: 0.7em;
+ border-bottom: 1px solid #BBB;
+ margin-bottom: 1.2em; }
+ header > h1 {
+ border: none;
+ padding: 0;
+ margin: 0;
+ font-size: 225%; }
+ header > h2 {
+ border: none;
+ padding: 0;
+ margin: 0;
+ font-style: normal;
+ font-size: 175%; }
+ header > h3 {
+ padding: 0;
+ margin: 0;
+ font-size: 125%;
+ font-style: italic; }
+ header + h1 {
+ border-top: none;
+ padding-top: 0px; }
+
+h1 {
+ border-top: 1px solid #BBB;
+ padding-top: 15px;
+ font-size: 150%;
+ margin-bottom: 10px; }
+ h1:first-of-type {
+ border: none; }
+
+h2 {
+ font-size: 125%; }
+
+h3 {
+ font-size: 105%; }
+
+hr {
+ border: 0px;
+ border-top: 1px solid #BBB;
+ width: 100%;
+ height: 0px; }
+ hr + h1 {
+ border-top: none;
+ padding-top: 0px; }
+
+ul, ol {
+ font-size: 90%;
+ margin-top: 10px;
+ margin-bottom: 15px;
+ padding-left: 30px; }
+
+ul {
+ list-style: circle; }
+
+ol {
+ list-style: decimal; }
+
+ul ul, ol ol, ul ol, ol ul {
+ font-size: inherit; }
+
+li {
+ margin-top: 5px;
+ margin-bottom: 7px; }
+
+q, blockquote, dd {
+ font-style: italic;
+ font-size: 90%; }
+
+blockquote, dd {
+ quotes: none;
+ border-left: 0.35em #BBB solid;
+ padding-left: 1.15em;
+ margin: 0 1.5em 0 0; }
+
+blockquote blockquote, dd blockquote, blockquote dd, dd dd, ol blockquote, ol dd, ul blockquote, ul dd, blockquote ol, dd ol,
+blockquote ul,
+dd ul {
+ font-size: inherit; }
+
+a, a:link, a:visited, a:hover {
+ color: inherit;
+ text-decoration: none;
+ border-bottom: 1px dashed #111; }
+ a:hover, a:link:hover, a:visited:hover, a:hover:hover {
+ border-bottom-style: solid; }
+ a.footnoteRef, a:link.footnoteRef, a:visited.footnoteRef, a:hover.footnoteRef {
+ border-bottom: none;
+ color: #666; }
+
+code {
+ font-family: "Consolas", "Monaco", monospace;
+ font-size: 85%;
+ background-color: #DDD;
+ border: 1px solid #BBB;
+ padding: 0px 0.15em 0px 0.15em;
+ -webkit-border-radius: 3px;
+ -moz-border-radius: 3px;
+ border-radius: 3px; }
+
+pre {
+ margin-right: 1.5em;
+ display: block; }
+ pre > code {
+ display: block;
+ font-size: 70%;
+ padding: 10px;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ border-radius: 5px;
+ overflow-x: auto; }
+
+blockquote pre, dd pre, ul pre, ol pre {
+ margin-left: 0;
+ margin-right: 0; }
+ blockquote pre > code, dd pre > code, ul pre > code, ol pre > code {
+ font-size: 77.7777777778%; }
+
+caption, figcaption {
+ font-size: 80%;
+ font-style: italic;
+ text-align: right;
+ margin-bottom: 5px; }
+ caption:empty, figcaption:empty {
+ display: none; }
+
+table {
+ width: 100%;
+ margin-top: 1em;
+ margin-bottom: 1em; }
+ table + h1 {
+ border-top: none; }
+
+tr td, tr th {
+ padding: 0.2em 0.7em; }
+tr.header {
+ border-top: 1px solid #222;
+ border-bottom: 1px solid #222;
+ font-weight: 700; }
+tr.odd {
+ background-color: #EEEEEE; }
+tr.even {
+ background-color: #CCCCCC; }
+
+tbody:last-child {
+ border-bottom: 1px solid #222; }
+
+dt {
+ font-weight: 700; }
+ dt:after {
+ font-weight: normal;
+ content: ":"; }
+
+dd {
+ margin-bottom: 10px; }
+
+figure {
+ margin: 1.3em 0 1.3em 0;
+ text-align: center;
+ padding: 0px;
+ width: 100%;
+ background-color: #DDD;
+ border: 1px solid #BBB;
+ -webkit-border-radius: 8px;
+ -moz-border-radius: 8px;
+ border-radius: 8px;
+ overflow: hidden; }
+
+img {
+ display: block;
+ margin: 0px auto;
+ padding: 0px;
+ max-width: 100%; }
+
+figcaption {
+ margin: 5px 10px 5px 30px; }
+
+.footnotes {
+ color: #666;
+ font-size: 70%;
+ font-style: italic; }
+ .footnotes li p:last-child a:last-child {
+ border-bottom: none; }
diff --git a/docs/tutorials/README.md b/docs/tutorials/README.md
index 2a3c85c55..c7c1a3616 100644
--- a/docs/tutorials/README.md
+++ b/docs/tutorials/README.md
@@ -6,11 +6,11 @@
**[Introduction Tutorial](podman_tutorial.md)**
-Learn how to setup Podman and perform some basic commands with the utility.
+Learn how to set up Podman and perform some basic commands with the utility.
**[Basic Setup and Use of Podman in a Rootless environment](rootless_tutorial.md)**
-The steps required to setup rootless Podman are enumerated.
+The steps required to set up rootless Podman are enumerated.
**[Setup Mac/Windows](mac_win_client.md)**
@@ -26,7 +26,7 @@ How the libpod API can be used within your own project.
**[Image Signing](image_signing.md)**
-Learn how to setup and use image signing with Podman.
+Learn how to set up and use image signing with Podman.
**[Basic Networking](basic_networking.md)**
diff --git a/docs/tutorials/basic_networking.md b/docs/tutorials/basic_networking.md
index 396994596..0a6034e7a 100644
--- a/docs/tutorials/basic_networking.md
+++ b/docs/tutorials/basic_networking.md
@@ -13,13 +13,14 @@ Each setup is supported with an example.
## Differences between rootful and rootless container networking
-One of the guiding factors on networking for containers with Podman is going to be
-whether or not the container is run by a root user or not. This is because unprivileged
-users cannot create networking interfaces on the host. Therefore, with rootful
-containers, the default networking mode is to use netavark.
-For rootless, the default network
-mode is slirp4netns. Because of the limited privileges, slirp4netns lacks some of
-the features of networking; for example, slirp4netns cannot give containers a
+One of the guiding factors on networking for containers with Podman is going to
+be whether or not the container is run by a root user or not. This is because
+unprivileged users cannot create networking interfaces on the host. Therefore,
+for rootless containers, the default network mode is slirp4netns. Because of the
+limited privileges, slirp4netns lacks some of the features of networking
+compared to rootful Podman's networking; for example, slirp4netns cannot give
+containers a routable IP address. The default networking mode for rootful
+containers on the other side is netavark, which allows a container to have a
routable IP address.
## Firewalls
@@ -93,6 +94,22 @@ When rootless containers are run, network operations
will be executed inside an extra network namespace. To join this namespace, use
`podman unshare --rootless-netns`.
+#### Default Network
+
+The default network `podman` with netavark is memory-only. It does not support dns resolution because of backwards compatibility with Docker. To change settings, export the in-memory network and change the file.
+
+For the default rootful network use
+```
+podman network inspect podman | jq .[] > /etc/containers/networks/podman.json
+```
+
+And for the rootless network use
+
+```
+podman network inspect podman | jq .[] > ~/.local/share/containers/storage/networks/podman.json
+```
+
+
#### Example
By default, rootful containers use the netavark for its default network if
diff --git a/docs/tutorials/mac_experimental.md b/docs/tutorials/mac_experimental.md
index b5b815fe5..1e75d01b8 100644
--- a/docs/tutorials/mac_experimental.md
+++ b/docs/tutorials/mac_experimental.md
@@ -1,4 +1,4 @@
-# Using podman-machine on MacOS (x86_64 and Apple silicon)
+# Using podman-machine on MacOS (Apple silicon and x86_64)
## Setup
diff --git a/docs/tutorials/podman-for-windows.md b/docs/tutorials/podman-for-windows.md
index 4e929a14a..48f9c1ab5 100644
--- a/docs/tutorials/podman-for-windows.md
+++ b/docs/tutorials/podman-for-windows.md
@@ -1,4 +1,4 @@
-![PODMAN logo](../../logo/podman-logo-source.svg)
+![](../../logo/podman-logo-source.svg)
Podman for Windows
==================
diff --git a/docs/tutorials/podman_tutorial.md b/docs/tutorials/podman_tutorial.md
index 83f1e5e1e..a371189e9 100644
--- a/docs/tutorials/podman_tutorial.md
+++ b/docs/tutorials/podman_tutorial.md
@@ -142,7 +142,7 @@ podman rm --latest
You can verify the deletion of the container by running *podman ps -a*.
## Integration Tests
-For more information on how to setup and run the integration tests in your environment, checkout the Integration Tests [README.md](../../test/README.md)
+For more information on how to set up and run the integration tests in your environment, checkout the Integration Tests [README.md](../../test/README.md)
## More information
diff --git a/docs/tutorials/remote_client.md b/docs/tutorials/remote_client.md
index ef7160f6c..5cd679193 100644
--- a/docs/tutorials/remote_client.md
+++ b/docs/tutorials/remote_client.md
@@ -54,15 +54,18 @@ host:
In order for the Podman client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled.
```
-sudo systemctl enable --now -s sshd
+sudo systemctl enable --now sshd
```
#### Setting up SSH
Remote Podman uses SSH to communicate between the client and server. The remote client works considerably smoother using SSH keys. To set up your ssh connection, you need to generate an ssh key pair from your client machine. *NOTE:* in some instances, using a `rsa` key will cause connection issues, be sure to create an `ed25519` key.
```
-ssh-keygen -t ed25519
+ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519
+```
+Your public key by default should be in your home directory under `~/.ssh/id_ed25519.pub`. You then need to copy the contents of `id_ed25519.pub` and append it into `~/.ssh/authorized_keys` on the Linux server. You can automate this using `ssh-copy-id`:
+```
+ssh-copy-id -i ~/.ssh/id_ed25519.pub 192.168.122.1
```
-Your public key by default should be in your home directory under ~/.ssh/id_ed25519.pub. You then need to copy the contents of id_ed25519.pub and append it into ~/.ssh/authorized_keys on the Linux server. You can automate this using ssh-copy-id.
If you do not wish to use SSH keys, you will be prompted with each Podman command for your login password.
diff --git a/go.mod b/go.mod
index 78562b4a7..bc39c9cf3 100644
--- a/go.mod
+++ b/go.mod
@@ -9,22 +9,22 @@ require (
github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0
github.com/checkpoint-restore/go-criu/v5 v5.3.0
github.com/container-orchestrated-devices/container-device-interface v0.4.0
- github.com/containernetworking/cni v1.1.0
+ github.com/containernetworking/cni v1.1.1
github.com/containernetworking/plugins v1.1.1
- github.com/containers/buildah v1.26.1
- github.com/containers/common v0.48.1-0.20220512112240-7536bf6ff9b1
+ github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c
+ github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9
github.com/containers/conmon v2.0.20+incompatible
- github.com/containers/image/v5 v5.21.2-0.20220519193817-1e26896b8059
- github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
+ github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
+ github.com/containers/ocicrypt v1.1.5
github.com/containers/psgo v1.7.2
- github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8
+ github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
github.com/davecgh/go-spew v1.1.1
github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001
github.com/docker/distribution v2.8.1+incompatible
- github.com/docker/docker v20.10.16+incompatible
+ github.com/docker/docker v20.10.17+incompatible
github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11
github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651
github.com/docker/go-units v0.4.0
@@ -47,29 +47,32 @@ require (
github.com/onsi/gomega v1.19.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
- github.com/opencontainers/runc v1.1.2
+ github.com/opencontainers/runc v1.1.3
github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab
github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f
github.com/opencontainers/selinux v1.10.1
- github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
github.com/rootless-containers/rootlesskit v1.0.1
github.com/sirupsen/logrus v1.8.1
- github.com/spf13/cobra v1.4.0
+ github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.1
+ github.com/stretchr/testify v1.8.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/ulikunitz/xz v0.5.10
- github.com/vbauerster/mpb/v7 v7.4.1
+ github.com/vbauerster/mpb/v7 v7.4.2
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
+ golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
golang.org/x/text v0.3.7
google.golang.org/protobuf v1.28.0
gopkg.in/inf.v0 v0.9.1
- gopkg.in/yaml.v2 v2.4.0
+ gopkg.in/yaml.v3 v3.0.1
)
+
+require github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 // indirect
+
+replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc
diff --git a/go.sum b/go.sum
index 19cf48e30..13056f853 100644
--- a/go.sum
+++ b/go.sum
@@ -109,8 +109,9 @@ github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
+github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -118,6 +119,7 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
+github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@@ -196,8 +198,6 @@ github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6pr
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU=
github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0 h1:txB5jvhzUCSiiQmqmMWpo5CEB7Gj/Hq5Xqi7eaPl8ko=
github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0/go.mod h1:67kWC1PXQLR3lM/mmNnu3Kzn7K4TSWZAGUuQP1JSngk=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
github.com/checkpoint-restore/go-criu/v5 v5.2.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
@@ -211,8 +211,8 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -268,9 +268,8 @@ github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
-github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig=
-github.com/containerd/containerd v1.6.4 h1:SEDZBp10mhCp+hkO3Njz/YhGrI7ah3edNcUlRdUPOgg=
-github.com/containerd/containerd v1.6.4/go.mod h1:oWOqbuJUZmOVafhA0lj2NAXbiO1u7F0K5l1bUgdyo94=
+github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
+github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -290,8 +289,7 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
-github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
-github.com/containerd/go-cni v1.1.5/go.mod h1:Rf2ZrMycr1El589IyuRzn7RkfdRZVKaFGaxSDHVAjj0=
+github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
@@ -330,23 +328,25 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
-github.com/containernetworking/cni v1.1.0 h1:T00oIz4hef+/p9gpRZa57SnIN+QnbmAHBjbxaOSFo9U=
-github.com/containernetworking/cni v1.1.0/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k=
+github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
-github.com/containers/buildah v1.26.1 h1:D65Vuo+orsI14WWtJhSX6KrpgBBa7+hveVWevzG8p8E=
-github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY=
-github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
-github.com/containers/common v0.48.1-0.20220512112240-7536bf6ff9b1 h1:U+2rYjzRCvI3WRSFf+Rohtu7jRgk/VhJjjFHbU6j0Sk=
-github.com/containers/common v0.48.1-0.20220512112240-7536bf6ff9b1/go.mod h1:h8YZVXePE7UViJQ3fPWpYAaeDNYBCzGtL5dA3N8yfT8=
+github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c h1:/fKyiLFFuceBPZGJ0Lig7ElURhfsslAOw1BOcItD+X8=
+github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c/go.mod h1:b0L+u2Dam7soWGn5sVTK31L++Xrf80AbGvK5z9D2+lw=
+github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9/go.mod h1:WBLwq+i7bicCpH54V70HM6s7jqDAESTlYnd05XXp0ac=
+github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9 h1:KeGIf6Z1R+16Sq+5/fhkoCCKa7wjQ6Ksnmo0beU1E2U=
+github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9/go.mod h1:Zt3D/IhgFyG1oaBrqsbn9NdH/4fkjsO2Y0ahP12ieu4=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
-github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw=
-github.com/containers/image/v5 v5.21.2-0.20220519193817-1e26896b8059 h1:/FzsjrQ2nJtMom9IXEGieORlwUk/NyDuuz5SWcNo324=
-github.com/containers/image/v5 v5.21.2-0.20220519193817-1e26896b8059/go.mod h1:KntCBNQn3qOuZmQuJ38ORyTozmWXiuo05Vef2S0Sm5M=
+github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4/go.mod h1:OsX9sFexyGF0FCNAjfcVFv3IwMqDyLyV/WQY/roLPcE=
+github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471/go.mod h1:KntCBNQn3qOuZmQuJ38ORyTozmWXiuo05Vef2S0Sm5M=
+github.com/containers/image/v5 v5.21.2-0.20220615100411-a78a00792916/go.mod h1:hEf8L08Hrh/3fK4vLf5l7988MJmij2swfCBUzqgnhF4=
+github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c h1:U2F/FaMt8gPP8sIpBfvXMCP5gAZfyxoYZ7lmu0dwsXc=
+github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c/go.mod h1:hEf8L08Hrh/3fK4vLf5l7988MJmij2swfCBUzqgnhF4=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -354,17 +354,19 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
-github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f h1:hffElEaoDQfREHltc2wtFPd68BqDmzW6KkEDpuSRBjs=
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
+github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ=
+github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc=
github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
-github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/containers/storage v1.41.0/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s=
-github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8 h1:4XdTbn3iVIr1+kN5srZND2G3/Q3hJiZSZZtKdL6r9jg=
-github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s=
+github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
+github.com/containers/storage v1.41.1-0.20220614214904-388efef4bf7e/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
+github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35 h1:6o+kw2z0BrdJ1wNYUbwLVzlb/65KPmZSy+32GNfppzM=
+github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -392,11 +394,11 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
@@ -428,12 +430,11 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.15+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.16+incompatible h1:2Db6ZR/+FUR3hqPMwnogOPHFn405crbpxvWzKovETOQ=
github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
+github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
@@ -489,14 +490,15 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y=
-github.com/fsouza/go-dockerclient v1.7.11 h1:pRmGMANAl+tmr+IYNYq8IWWcSbiKQMSRumYLv8H5sfk=
-github.com/fsouza/go-dockerclient v1.7.11/go.mod h1:zvYxutUNOK853i1s7VywZxQgxSHbm7A6en/q9MHBN6k=
+github.com/fsouza/go-dockerclient v1.8.1 h1:a27vHYqNSZz88nUAurI1o6W5PgEt63nAWilOI+j63RE=
+github.com/fsouza/go-dockerclient v1.8.1/go.mod h1:zmA2ogSxRnXmbZcy0Aq7yhRoCdP/bDns/qghCK9SWtM=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E=
@@ -777,14 +779,13 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E=
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
-github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44=
-github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
@@ -837,8 +838,9 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
-github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ=
github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
+github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -848,8 +850,9 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -951,13 +954,15 @@ github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
+github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
+github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
-github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc=
github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
@@ -1047,17 +1052,8 @@ github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4=
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg=
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
-github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
-github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw=
-github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc h1:qjkUzmFsOFbQyjObybk40mRida83j5IHRaKzLGdBbEU=
+github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc/go.mod h1:wUOQGsiKae6VzA/UvlCK3cO+pHk8F2VQHlIoITEfMM8=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -1167,6 +1163,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0=
github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rootless-containers/rootlesskit v1.0.1 h1:jepqW1txFSowKSMAEkVhWH3Oa1TCY9S400MVYe/6Iro=
github.com/rootless-containers/rootlesskit v1.0.1/go.mod h1:t2UAiYagxrJ+wmpFAUIZPcqsm4k2B7ve6g7lILKbloc=
@@ -1188,9 +1186,10 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y=
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
+github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
@@ -1232,8 +1231,9 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -1254,8 +1254,9 @@ github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@@ -1265,11 +1266,14 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk=
github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
+github.com/sylabs/sif/v2 v2.7.1 h1:XXt9AP39sQfsMCGOGQ/XP9H47yqZOvAonalkaCaNIYM=
+github.com/sylabs/sif/v2 v2.7.1/go.mod h1:bBse2nEFd3yHkmq6KmAOFEWQg5LdFYiQUdVcgamxlc8=
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1308,6 +1312,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs=
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -1316,8 +1321,9 @@ github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/V
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
-github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA=
github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo=
+github.com/vbauerster/mpb/v7 v7.4.2 h1:n917F4d8EWdUKc9c81wFkksyG6P6Mg7IETfKCE1Xqng=
+github.com/vbauerster/mpb/v7 v7.4.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA=
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@@ -1708,14 +1714,16 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2049,8 +2057,10 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
diff --git a/hack/golangci-lint.sh b/hack/golangci-lint.sh
index 8b80bd9c9..2eaf206d7 100755
--- a/hack/golangci-lint.sh
+++ b/hack/golangci-lint.sh
@@ -4,10 +4,9 @@
set -e
declare -A BUILD_TAGS
-# TODO: add systemd tag
BUILD_TAGS[default]="apparmor,seccomp,selinux,linter"
-BUILD_TAGS[abi]="${BUILD_TAGS[default]},!remoteclient"
-BUILD_TAGS[tunnel]="${BUILD_TAGS[default]},remote,remoteclient"
+BUILD_TAGS[abi]="${BUILD_TAGS[default]},systemd"
+BUILD_TAGS[tunnel]="${BUILD_TAGS[default]},remote"
declare -A SKIP_DIRS
SKIP_DIRS[abi]="pkg/machine/e2e"
diff --git a/hack/install_golangci.sh b/hack/install_golangci.sh
index 4ef6bc83b..896d59901 100755
--- a/hack/install_golangci.sh
+++ b/hack/install_golangci.sh
@@ -9,14 +9,17 @@ function install() {
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v$VERSION
}
-BIN="./bin/golangci-lint"
+# Undocumented behavior: golangci-lint installer requires $BINDIR in env,
+# will default to ./bin but we can't rely on that.
+export BINDIR="./bin"
+BIN="$BINDIR/golangci-lint"
if [ ! -x "$BIN" ]; then
install
else
# Prints its own file name as part of --version output
$BIN --version | grep "$VERSION"
if [ $? -eq 0 ]; then
- echo "Using existing $(dirname $BIN)/$($BIN --version)"
+ echo "Using existing $BINDIR/$($BIN --version)"
else
install
fi
diff --git a/hack/podman-registry b/hack/podman-registry
index 3f0aa2aea..f6a266883 100755
--- a/hack/podman-registry
+++ b/hack/podman-registry
@@ -122,6 +122,25 @@ function must_pass() {
fi
}
+###################
+# wait_for_port # Returns once port is available on localhost
+###################
+function wait_for_port() {
+ local port=$1 # Numeric port
+
+ local host=127.0.0.1
+ local _timeout=5
+
+ # Wait
+ while [ $_timeout -gt 0 ]; do
+ { exec {unused_fd}<> /dev/tcp/$host/$port; } &>/dev/null && return
+ sleep 1
+ _timeout=$(( $_timeout - 1 ))
+ done
+
+ die "Timed out waiting for port $port"
+}
+
# END helper functions
###############################################################################
# BEGIN action processing
@@ -130,7 +149,7 @@ function do_start() {
# If called without a port, assign a random one in the 5xxx range
if [ -z "${PODMAN_REGISTRY_PORT}" ]; then
for port in $(shuf -i 5000-5999);do
- if ! { exec 3<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
+ if ! { exec {unused_fd}<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
PODMAN_REGISTRY_PORT=$port
break
fi
@@ -203,6 +222,9 @@ function do_start() {
-e "REGISTRY_HTTP_TLS_KEY=/auth/domain.key" \
registry:2.6
+ # Confirm that registry started and port is active
+ wait_for_port $PODMAN_REGISTRY_PORT
+
# Dump settings. Our caller will use these to access the registry.
for v in IMAGE PORT USER PASS; do
echo "PODMAN_REGISTRY_${v}=\"$(eval echo \$PODMAN_REGISTRY_${v})\""
diff --git a/hack/podman-registry-go/registry.go b/hack/podman-registry-go/registry.go
index af8f3117c..d66d092b6 100644
--- a/hack/podman-registry-go/registry.go
+++ b/hack/podman-registry-go/registry.go
@@ -1,10 +1,10 @@
package registry
import (
+ "fmt"
"strings"
"github.com/containers/podman/v4/utils"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -57,7 +57,7 @@ func StartWithOptions(options *Options) (*Registry, error) {
// Start a registry.
out, err := utils.ExecCmd(binary, args...)
if err != nil {
- return nil, errors.Wrapf(err, "error running %q: %s", binary, out)
+ return nil, fmt.Errorf("error running %q: %s: %w", binary, out, err)
}
// Parse the output.
@@ -68,7 +68,7 @@ func StartWithOptions(options *Options) (*Registry, error) {
}
spl := strings.Split(s, "=")
if len(spl) != 2 {
- return nil, errors.Errorf("unexpected output format %q: want 'PODMAN_...=...'", s)
+ return nil, fmt.Errorf("unexpected output format %q: want 'PODMAN_...=...'", s)
}
key := spl[0]
val := strings.TrimSuffix(strings.TrimPrefix(spl[1], "\""), "\"")
@@ -88,16 +88,16 @@ func StartWithOptions(options *Options) (*Registry, error) {
// Extra sanity check.
if registry.Image == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, ImageKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, ImageKey)
}
if registry.User == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, UserKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, UserKey)
}
if registry.Password == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, PassKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, PassKey)
}
if registry.Port == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, PortKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, PortKey)
}
registry.running = true
@@ -112,7 +112,7 @@ func (r *Registry) Stop() error {
return nil
}
if _, err := utils.ExecCmd(binary, "-P", r.Port, "stop"); err != nil {
- return errors.Wrapf(err, "error stopping registry (%v) with %q", *r, binary)
+ return fmt.Errorf("error stopping registry (%v) with %q: %w", *r, binary, err)
}
r.running = false
return nil
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 9745121c7..81f11410b 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -2,16 +2,18 @@ package libpod
import (
"bytes"
+ "errors"
"fmt"
"net"
"os"
+ "strconv"
"strings"
"sync"
+ "time"
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/libpod/define"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
@@ -63,6 +65,13 @@ type BoltState struct {
// initially created the database. This must match for any further instances
// that access the database, to ensure that state mismatches with
// containers/storage do not occur.
+// - exitCodeBucket/exitCodeTimeStampBucket: (#14559) exit codes must be part
+// of the database to resolve a previous race condition when one process waits
+// for the exit file to be written and another process removes it along with
+// the container during auto-removal. The same race would happen trying to
+// read the exit code from the containers bucket. Hence, exit codes go into
+// their own bucket. To avoid the rather expensive JSON (un)marshaling, we
+// have two buckets: one for the exit codes, the other for the timestamps.
// NewBoltState creates a new bolt-backed state database
func NewBoltState(path string, runtime *Runtime) (State, error) {
@@ -76,7 +85,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
db, err := bolt.Open(path, 0600, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error opening database %s", path)
+ return nil, fmt.Errorf("error opening database %s: %w", path, err)
}
// Everywhere else, we use s.deferredCloseDBCon(db) to ensure the state's DB
// mutex is also unlocked.
@@ -98,6 +107,8 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
allVolsBkt,
execBkt,
runtimeConfigBkt,
+ exitCodeBkt,
+ exitCodeTimeStampBkt,
}
// Does the DB need an update?
@@ -112,7 +123,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
return nil
})
if err != nil {
- return nil, errors.Wrapf(err, "error checking DB schema")
+ return nil, fmt.Errorf("error checking DB schema: %w", err)
}
if !needsUpdate {
@@ -124,13 +135,13 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
err = db.Update(func(tx *bolt.Tx) error {
for _, bkt := range createBuckets {
if _, err := tx.CreateBucketIfNotExists(bkt); err != nil {
- return errors.Wrapf(err, "error creating bucket %s", string(bkt))
+ return fmt.Errorf("error creating bucket %s: %w", string(bkt), err)
}
}
return nil
})
if err != nil {
- return nil, errors.Wrapf(err, "error creating buckets for DB")
+ return nil, fmt.Errorf("error creating buckets for DB: %w", err)
}
state.valid = true
@@ -162,6 +173,11 @@ func (s *BoltState) Refresh() error {
return err
}
+ namesBucket, err := getNamesBucket(tx)
+ if err != nil {
+ return err
+ }
+
ctrsBucket, err := getCtrBucket(tx)
if err != nil {
return err
@@ -187,11 +203,51 @@ func (s *BoltState) Refresh() error {
return err
}
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ // Clear all exec exit codes
+ toRemoveExitCodes := []string{}
+ err = exitCodeBucket.ForEach(func(id, _ []byte) error {
+ toRemoveExitCodes = append(toRemoveExitCodes, string(id))
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error reading exit codes bucket: %w", err)
+ }
+ for _, id := range toRemoveExitCodes {
+ if err := exitCodeBucket.Delete([]byte(id)); err != nil {
+ return fmt.Errorf("error removing exit code for ID %s: %w", id, err)
+ }
+ }
+
+ toRemoveTimeStamps := []string{}
+ err = timeStampBucket.ForEach(func(id, _ []byte) error {
+ toRemoveTimeStamps = append(toRemoveTimeStamps, string(id))
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("reading timestamps bucket: %w", err)
+ }
+ for _, id := range toRemoveTimeStamps {
+ if err := timeStampBucket.Delete([]byte(id)); err != nil {
+ return fmt.Errorf("removing timestamp for ID %s: %w", id, err)
+ }
+ }
+
// Iterate through all IDs. Check if they are containers.
// If they are, unmarshal their state, and then clear
// PID, mountpoint, and state for all of them
// Then save the modified state
// Also clear all network namespaces
+ toRemoveIDs := []string{}
err = idBucket.ForEach(func(id, name []byte) error {
ctrBkt := ctrsBucket.Bucket(id)
if ctrBkt == nil {
@@ -199,20 +255,28 @@ func (s *BoltState) Refresh() error {
podBkt := podsBucket.Bucket(id)
if podBkt == nil {
// This is neither a pod nor a container
- // Error out on the dangling ID
- return errors.Wrapf(define.ErrInternal, "id %s is not a pod or a container", string(id))
+ // Something is seriously wrong, but
+ // continue on and try to clean up the
+ // state and become consistent.
+ // Just note what needs to be removed
+ // for now - ForEach says you shouldn't
+ // remove things from the table during
+ // it.
+ logrus.Errorf("Database issue: dangling ID %s found (not a pod or container) - removing", string(id))
+ toRemoveIDs = append(toRemoveIDs, string(id))
+ return nil
}
// Get the state
stateBytes := podBkt.Get(stateKey)
if stateBytes == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing state key", string(id))
+ return fmt.Errorf("pod %s missing state key: %w", string(id), define.ErrInternal)
}
state := new(podState)
if err := json.Unmarshal(stateBytes, state); err != nil {
- return errors.Wrapf(err, "error unmarshalling state for pod %s", string(id))
+ return fmt.Errorf("error unmarshalling state for pod %s: %w", string(id), err)
}
// Clear the Cgroup path
@@ -220,11 +284,11 @@ func (s *BoltState) Refresh() error {
newStateBytes, err := json.Marshal(state)
if err != nil {
- return errors.Wrapf(err, "error marshalling modified state for pod %s", string(id))
+ return fmt.Errorf("error marshalling modified state for pod %s: %w", string(id), err)
}
if err := podBkt.Put(stateKey, newStateBytes); err != nil {
- return errors.Wrapf(err, "error updating state for pod %s in DB", string(id))
+ return fmt.Errorf("error updating state for pod %s in DB: %w", string(id), err)
}
// It's not a container, nothing to do
@@ -233,30 +297,30 @@ func (s *BoltState) Refresh() error {
// First, delete the network namespace
if err := ctrBkt.Delete(netNSKey); err != nil {
- return errors.Wrapf(err, "error removing network namespace for container %s", string(id))
+ return fmt.Errorf("error removing network namespace for container %s: %w", string(id), err)
}
stateBytes := ctrBkt.Get(stateKey)
if stateBytes == nil {
// Badly formatted container bucket
- return errors.Wrapf(define.ErrInternal, "container %s missing state in DB", string(id))
+ return fmt.Errorf("container %s missing state in DB: %w", string(id), define.ErrInternal)
}
state := new(ContainerState)
if err := json.Unmarshal(stateBytes, state); err != nil {
- return errors.Wrapf(err, "error unmarshalling state for container %s", string(id))
+ return fmt.Errorf("error unmarshalling state for container %s: %w", string(id), err)
}
resetState(state)
newStateBytes, err := json.Marshal(state)
if err != nil {
- return errors.Wrapf(err, "error marshalling modified state for container %s", string(id))
+ return fmt.Errorf("error marshalling modified state for container %s: %w", string(id), err)
}
if err := ctrBkt.Put(stateKey, newStateBytes); err != nil {
- return errors.Wrapf(err, "error updating state for container %s in DB", string(id))
+ return fmt.Errorf("error updating state for container %s in DB: %w", string(id), err)
}
// Delete all exec sessions, if there are any
@@ -274,7 +338,7 @@ func (s *BoltState) Refresh() error {
}
for _, execID := range toRemove {
if err := ctrExecBkt.Delete([]byte(execID)); err != nil {
- return errors.Wrapf(err, "error removing exec session %s from container %s", execID, string(id))
+ return fmt.Errorf("error removing exec session %s from container %s: %w", execID, string(id), err)
}
}
}
@@ -285,11 +349,29 @@ func (s *BoltState) Refresh() error {
return err
}
+ // Remove dangling IDs.
+ for _, id := range toRemoveIDs {
+ // Look up the ID to see if we also have a dangling name
+ // in the DB.
+ name := idBucket.Get([]byte(id))
+ if name != nil {
+ if testID := namesBucket.Get(name); testID != nil {
+ logrus.Infof("Found dangling name %s (ID %s) in database", string(name), id)
+ if err := namesBucket.Delete(name); err != nil {
+ return fmt.Errorf("error removing dangling name %s (ID %s) from database: %w", string(name), id, err)
+ }
+ }
+ }
+ if err := idBucket.Delete([]byte(id)); err != nil {
+ return fmt.Errorf("error removing dangling ID %s from database: %w", id, err)
+ }
+ }
+
// Now refresh volumes
err = allVolsBucket.ForEach(func(id, name []byte) error {
dbVol := volBucket.Bucket(id)
if dbVol == nil {
- return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ return fmt.Errorf("inconsistency in state - volume %s is in all volumes bucket but volume not found: %w", string(id), define.ErrInternal)
}
// Get the state
@@ -302,7 +384,7 @@ func (s *BoltState) Refresh() error {
oldState := new(VolumeState)
if err := json.Unmarshal(volStateBytes, oldState); err != nil {
- return errors.Wrapf(err, "error unmarshalling state for volume %s", string(id))
+ return fmt.Errorf("error unmarshalling state for volume %s: %w", string(id), err)
}
// Reset mount count to 0
@@ -311,11 +393,11 @@ func (s *BoltState) Refresh() error {
newState, err := json.Marshal(oldState)
if err != nil {
- return errors.Wrapf(err, "error marshalling state for volume %s", string(id))
+ return fmt.Errorf("error marshalling state for volume %s: %w", string(id), err)
}
if err := dbVol.Put(stateKey, newState); err != nil {
- return errors.Wrapf(err, "error storing new state for volume %s", string(id))
+ return fmt.Errorf("error storing new state for volume %s: %w", string(id), err)
}
return nil
@@ -339,7 +421,7 @@ func (s *BoltState) Refresh() error {
for _, execSession := range toRemoveExec {
if err := execBucket.Delete([]byte(execSession)); err != nil {
- return errors.Wrapf(err, "error deleting exec session %s registry from database", execSession)
+ return fmt.Errorf("error deleting exec session %s registry from database: %w", execSession, err)
}
}
@@ -561,7 +643,7 @@ func (s *BoltState) LookupContainerID(idOrName string) (string, error) {
if s.namespaceBytes != nil {
ns := nsBucket.Get(fullID)
if !bytes.Equal(ns, s.namespaceBytes) {
- return errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
+ return fmt.Errorf("no container found with name or ID %s: %w", idOrName, define.ErrNoSuchCtr)
}
}
id = fullID
@@ -686,7 +768,7 @@ func (s *BoltState) AddContainer(ctr *Container) error {
}
if ctr.config.Pod != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod")
+ return fmt.Errorf("cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod: %w", define.ErrInvalidArg)
}
return s.addContainer(ctr, nil)
@@ -701,7 +783,7 @@ func (s *BoltState) RemoveContainer(ctr *Container) error {
}
if ctr.config.Pod != "" {
- return errors.Wrapf(define.ErrPodExists, "container %s is part of a pod, use RemoveContainerFromPod instead", ctr.ID())
+ return fmt.Errorf("container %s is part of a pod, use RemoveContainerFromPod instead: %w", ctr.ID(), define.ErrPodExists)
}
db, err := s.getDBCon()
@@ -727,7 +809,7 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
newState := new(ContainerState)
@@ -750,16 +832,16 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
ctrToUpdate := ctrBucket.Bucket(ctrID)
if ctrToUpdate == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
newStateBytes := ctrToUpdate.Get(stateKey)
if newStateBytes == nil {
- return errors.Wrapf(define.ErrInternal, "container %s does not have a state key in DB", ctr.ID())
+ return fmt.Errorf("container %s does not have a state key in DB: %w", ctr.ID(), define.ErrInternal)
}
if err := json.Unmarshal(newStateBytes, newState); err != nil {
- return errors.Wrapf(err, "error unmarshalling container %s state", ctr.ID())
+ return fmt.Errorf("error unmarshalling container %s state: %w", ctr.ID(), err)
}
netNSBytes := ctrToUpdate.Get(netNSKey)
@@ -799,12 +881,12 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
stateJSON, err := json.Marshal(ctr.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling container %s state to JSON", ctr.ID())
+ return fmt.Errorf("error marshalling container %s state to JSON: %w", ctr.ID(), err)
}
netNSPath := getNetNSPath(ctr)
@@ -825,22 +907,22 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
ctrToSave := ctrBucket.Bucket(ctrID)
if ctrToSave == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in DB", ctr.ID())
+ return fmt.Errorf("container %s does not exist in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
// Update the state
if err := ctrToSave.Put(stateKey, stateJSON); err != nil {
- return errors.Wrapf(err, "error updating container %s state in DB", ctr.ID())
+ return fmt.Errorf("error updating container %s state in DB: %w", ctr.ID(), err)
}
if netNSPath != "" {
if err := ctrToSave.Put(netNSKey, []byte(netNSPath)); err != nil {
- return errors.Wrapf(err, "error updating network namespace path for container %s in DB", ctr.ID())
+ return fmt.Errorf("error updating network namespace path for container %s in DB: %w", ctr.ID(), err)
}
} else {
// Delete the existing network namespace
if err := ctrToSave.Delete(netNSKey); err != nil {
- return errors.Wrapf(err, "error removing network namespace path for container %s in DB", ctr.ID())
+ return fmt.Errorf("error removing network namespace path for container %s in DB: %w", ctr.ID(), err)
}
}
@@ -862,7 +944,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
depCtrs := []string{}
@@ -882,12 +964,12 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
ctrDB := ctrBucket.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %q found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
dependsBkt := ctrDB.Bucket(dependenciesBkt)
if dependsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "container %s has no dependencies bucket", ctr.ID())
+ return fmt.Errorf("container %s has no dependencies bucket: %w", ctr.ID(), define.ErrInternal)
}
// Iterate through and add dependencies
@@ -940,7 +1022,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// be much less helpful.
ctrExists := ctrBucket.Bucket(id)
if ctrExists == nil {
- return errors.Wrapf(define.ErrInternal, "state is inconsistent - container ID %s in all containers, but container not found", string(id))
+ return fmt.Errorf("state is inconsistent - container ID %s in all containers, but container not found: %w", string(id), define.ErrInternal)
}
ctr := new(Container)
@@ -952,7 +1034,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// ignore it safely.
// We just won't include the container in the
// results.
- if errors.Cause(err) != define.ErrNSMismatch {
+ if !errors.Is(err, define.ErrNSMismatch) {
// Even if it's not an NS mismatch, it's
// not worth erroring over.
// If we do, a single bad container JSON
@@ -984,7 +1066,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
// if the network mode is not bridge return no networks
@@ -1013,7 +1095,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrNetworkBkt := dbCtr.Bucket(networksBkt)
@@ -1051,7 +1133,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
var networkList []string
@@ -1060,7 +1142,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
if ctrNetworkBkt == nil {
ctrNetworkBkt, err = dbCtr.CreateBucket(networksBkt)
if err != nil {
- return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating networks bucket for container %s: %w", ctr.ID(), err)
}
// the container has no networks in the db lookup config and write to the db
networkList = ctr.config.NetworksDeprecated
@@ -1084,7 +1166,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
if ctr.state.NetInterfaceDescriptions != nil {
eth, exists := ctr.state.NetInterfaceDescriptions.getInterfaceByName(network)
if !exists {
- return errors.Errorf("no network interface name for container %s on network %s", ctr.config.ID, network)
+ return fmt.Errorf("no network interface name for container %s on network %s", ctr.config.ID, network)
}
intName = eth
} else {
@@ -1158,16 +1240,16 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, opts types.Pe
}
if network == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ return fmt.Errorf("network names must not be empty: %w", define.ErrInvalidArg)
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
optBytes, err := json.Marshal(opts)
if err != nil {
- return errors.Wrapf(err, "error marshalling network options JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling network options JSON for container %s: %w", ctr.ID(), err)
}
ctrID := []byte(ctr.ID())
@@ -1187,21 +1269,21 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, opts types.Pe
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrNetworksBkt := dbCtr.Bucket(networksBkt)
if ctrNetworksBkt == nil {
- return errors.Wrapf(define.ErrNoSuchNetwork, "container %s does not have a network bucket", ctr.ID())
+ return fmt.Errorf("container %s does not have a network bucket: %w", ctr.ID(), define.ErrNoSuchNetwork)
}
netConnected := ctrNetworksBkt.Get([]byte(network))
if netConnected != nil {
- return errors.Wrapf(define.ErrNetworkExists, "container %s is already connected to network %q", ctr.ID(), network)
+ return fmt.Errorf("container %s is already connected to network %q: %w", ctr.ID(), network, define.ErrNetworkExists)
}
// Add the network
if err := ctrNetworksBkt.Put([]byte(network), optBytes); err != nil {
- return errors.Wrapf(err, "error adding container %s to network %s in DB", ctr.ID(), network)
+ return fmt.Errorf("error adding container %s to network %s in DB: %w", ctr.ID(), network, err)
}
return nil
@@ -1220,11 +1302,11 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
}
if network == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ return fmt.Errorf("network names must not be empty: %w", define.ErrInvalidArg)
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
ctrID := []byte(ctr.ID())
@@ -1244,21 +1326,21 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
ctrNetworksBkt := dbCtr.Bucket(networksBkt)
if ctrNetworksBkt == nil {
- return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not connected to any CNI networks, so cannot disconnect", ctr.ID())
+ return fmt.Errorf("container %s is not connected to any CNI networks, so cannot disconnect: %w", ctr.ID(), define.ErrNoSuchNetwork)
}
netConnected := ctrNetworksBkt.Get([]byte(network))
if netConnected == nil {
- return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not connected to CNI network %q", ctr.ID(), network)
+ return fmt.Errorf("container %s is not connected to CNI network %q: %w", ctr.ID(), network, define.ErrNoSuchNetwork)
}
if err := ctrNetworksBkt.Delete([]byte(network)); err != nil {
- return errors.Wrapf(err, "error removing container %s from network %s", ctr.ID(), network)
+ return fmt.Errorf("error removing container %s from network %s: %w", ctr.ID(), network, err)
}
if ctrAliasesBkt != nil {
@@ -1268,7 +1350,7 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
}
if err := ctrAliasesBkt.DeleteBucket([]byte(network)); err != nil {
- return errors.Wrapf(err, "error removing container %s network aliases for network %s", ctr.ID(), network)
+ return fmt.Errorf("error removing container %s network aliases for network %s: %w", ctr.ID(), network, err)
}
}
@@ -1309,6 +1391,204 @@ func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) {
return config, nil
}
+// AddContainerExitCode adds the exit code for the specified container to the database.
+func (s *BoltState) AddContainerExitCode(id string, exitCode int32) error {
+ if len(id) == 0 {
+ return define.ErrEmptyID
+ }
+
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ rawID := []byte(id)
+ rawExitCode := []byte(strconv.Itoa(int(exitCode)))
+ rawTimeStamp, err := time.Now().MarshalText()
+ if err != nil {
+ return fmt.Errorf("marshaling exit-code time stamp: %w", err)
+ }
+
+ return db.Update(func(tx *bolt.Tx) error {
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ if err := exitCodeBucket.Put(rawID, rawExitCode); err != nil {
+ return fmt.Errorf("adding exit code of container %s to DB: %w", id, err)
+ }
+ if err := timeStampBucket.Put(rawID, rawTimeStamp); err != nil {
+ if rmErr := exitCodeBucket.Delete(rawID); rmErr != nil {
+ logrus.Errorf("Removing exit code of container %s from DB: %v", id, rmErr)
+ }
+ return fmt.Errorf("adding exit-code time stamp of container %s to DB: %w", id, err)
+ }
+
+ return nil
+ })
+}
+
+// GetContainerExitCode returns the exit code for the specified container.
+func (s *BoltState) GetContainerExitCode(id string) (int32, error) {
+ if len(id) == 0 {
+ return -1, define.ErrEmptyID
+ }
+
+ if !s.valid {
+ return -1, define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return -1, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ rawID := []byte(id)
+ result := int32(-1)
+ return result, db.View(func(tx *bolt.Tx) error {
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ rawExitCode := exitCodeBucket.Get(rawID)
+ if rawExitCode == nil {
+ return fmt.Errorf("getting exit code of container %s from DB: %w", id, define.ErrNoSuchExitCode)
+ }
+
+ exitCode, err := strconv.Atoi(string(rawExitCode))
+ if err != nil {
+ return fmt.Errorf("converting raw exit code %v of container %s: %w", rawExitCode, id, err)
+ }
+
+ result = int32(exitCode)
+ return nil
+ })
+}
+
+// GetContainerExitCodeTimeStamp returns the time stamp when the exit code of
+// the specified container was added to the database.
+func (s *BoltState) GetContainerExitCodeTimeStamp(id string) (*time.Time, error) {
+ if len(id) == 0 {
+ return nil, define.ErrEmptyID
+ }
+
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ rawID := []byte(id)
+ var result time.Time
+ return &result, db.View(func(tx *bolt.Tx) error {
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ rawTimeStamp := timeStampBucket.Get(rawID)
+ if rawTimeStamp == nil {
+ return fmt.Errorf("getting exit-code time stamp of container %s from DB: %w", id, define.ErrNoSuchExitCode)
+ }
+
+ if err := result.UnmarshalText(rawTimeStamp); err != nil {
+ return fmt.Errorf("converting raw time stamp %v of container %s from DB: %w", rawTimeStamp, id, err)
+ }
+
+ return nil
+ })
+}
+
+// PruneExitCodes removes exit codes older than 5 minutes.
+func (s *BoltState) PruneContainerExitCodes() error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ toRemoveIDs := []string{}
+
+ threshold := time.Minute * 5
+ err = db.View(func(tx *bolt.Tx) error {
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ return timeStampBucket.ForEach(func(rawID, rawTimeStamp []byte) error {
+ var timeStamp time.Time
+ if err := timeStamp.UnmarshalText(rawTimeStamp); err != nil {
+ return fmt.Errorf("converting raw time stamp %v of container %s from DB: %w", rawTimeStamp, string(rawID), err)
+ }
+ if time.Since(timeStamp) > threshold {
+ toRemoveIDs = append(toRemoveIDs, string(rawID))
+ }
+ return nil
+ })
+ })
+ if err != nil {
+ return fmt.Errorf("reading exit codes to prune: %w", err)
+ }
+
+ if len(toRemoveIDs) > 0 {
+ err = db.Update(func(tx *bolt.Tx) error {
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ var finalErr error
+ for _, id := range toRemoveIDs {
+ rawID := []byte(id)
+ if err := exitCodeBucket.Delete(rawID); err != nil {
+ if finalErr != nil {
+ logrus.Error(finalErr)
+ }
+ finalErr = fmt.Errorf("removing exit code of container %s from DB: %w", id, err)
+ }
+ if err := timeStampBucket.Delete(rawID); err != nil {
+ if finalErr != nil {
+ logrus.Error(finalErr)
+ }
+ finalErr = fmt.Errorf("removing exit code timestamp of container %s from DB: %w", id, err)
+ }
+ }
+
+ return finalErr
+ })
+ if err != nil {
+ return fmt.Errorf("pruning exit codes: %w", err)
+ }
+ }
+
+ return nil
+}
+
// AddExecSession adds an exec session to the state.
func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error {
if !s.valid {
@@ -1341,25 +1621,25 @@ func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error {
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not present in the database", ctr.ID())
+ return fmt.Errorf("container %s is not present in the database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrExecSessionBucket, err := dbCtr.CreateBucketIfNotExists(execBkt)
if err != nil {
- return errors.Wrapf(err, "error creating exec sessions bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating exec sessions bucket for container %s: %w", ctr.ID(), err)
}
execExists := execBucket.Get(sessionID)
if execExists != nil {
- return errors.Wrapf(define.ErrExecSessionExists, "an exec session with ID %s already exists", session.ID())
+ return fmt.Errorf("an exec session with ID %s already exists: %w", session.ID(), define.ErrExecSessionExists)
}
if err := execBucket.Put(sessionID, ctrID); err != nil {
- return errors.Wrapf(err, "error adding exec session %s to DB", session.ID())
+ return fmt.Errorf("error adding exec session %s to DB: %w", session.ID(), err)
}
if err := ctrExecSessionBucket.Put(sessionID, ctrID); err != nil {
- return errors.Wrapf(err, "error adding exec session %s to container %s in DB", session.ID(), ctr.ID())
+ return fmt.Errorf("error adding exec session %s to container %s in DB: %w", session.ID(), ctr.ID(), err)
}
return nil
@@ -1393,7 +1673,7 @@ func (s *BoltState) GetExecSession(id string) (string, error) {
ctr := execBucket.Get([]byte(id))
if ctr == nil {
- return errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found", id)
+ return fmt.Errorf("no exec session with ID %s found: %w", id, define.ErrNoSuchExecSession)
}
ctrID = string(ctr)
return nil
@@ -1432,11 +1712,11 @@ func (s *BoltState) RemoveExecSession(session *ExecSession) error {
}
// Check that container ID matches
if string(sessionExists) != session.ContainerID() {
- return errors.Wrapf(define.ErrInternal, "database inconsistency: exec session %s points to container %s in state but %s in database", session.ID(), session.ContainerID(), string(sessionExists))
+ return fmt.Errorf("database inconsistency: exec session %s points to container %s in state but %s in database: %w", session.ID(), session.ContainerID(), string(sessionExists), define.ErrInternal)
}
if err := execBucket.Delete(sessionID); err != nil {
- return errors.Wrapf(err, "error removing exec session %s from database", session.ID())
+ return fmt.Errorf("error removing exec session %s from database: %w", session.ID(), err)
}
dbCtr := ctrBucket.Bucket(containerID)
@@ -1459,7 +1739,7 @@ func (s *BoltState) RemoveExecSession(session *ExecSession) error {
ctrSessionExists := ctrExecBucket.Get(sessionID)
if ctrSessionExists != nil {
if err := ctrExecBucket.Delete(sessionID); err != nil {
- return errors.Wrapf(err, "error removing exec session %s from container %s in database", session.ID(), session.ContainerID())
+ return fmt.Errorf("error removing exec session %s from container %s in database: %w", session.ID(), session.ContainerID(), err)
}
}
@@ -1567,7 +1847,7 @@ func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error {
for _, session := range sessions {
if err := ctrExecSessions.Delete([]byte(session)); err != nil {
- return errors.Wrapf(err, "error removing container %s exec session %s from database", ctr.ID(), session)
+ return fmt.Errorf("error removing container %s exec session %s from database: %w", ctr.ID(), session, err)
}
// Check if the session exists in the global table
// before removing. It should, but in cases where the DB
@@ -1578,10 +1858,10 @@ func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error {
continue
}
if string(sessionExists) != ctr.ID() {
- return errors.Wrapf(define.ErrInternal, "database mismatch: exec session %s is associated with containers %s and %s", session, ctr.ID(), string(sessionExists))
+ return fmt.Errorf("database mismatch: exec session %s is associated with containers %s and %s: %w", session, ctr.ID(), string(sessionExists), define.ErrInternal)
}
if err := execBucket.Delete([]byte(session)); err != nil {
- return errors.Wrapf(err, "error removing container %s exec session %s from exec sessions", ctr.ID(), session)
+ return fmt.Errorf("error removing container %s exec session %s from exec sessions: %w", ctr.ID(), session, err)
}
}
@@ -1604,7 +1884,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling new configuration JSON for container %s: %w", ctr.ID(), err)
}
db, err := s.getDBCon()
@@ -1622,11 +1902,11 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %q found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ return fmt.Errorf("error updating container %s config JSON: %w", ctr.ID(), err)
}
return nil
@@ -1649,15 +1929,15 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
}
if newName != "" && newCfg.Name != newName {
- return errors.Wrapf(define.ErrInvalidArg, "new name %s for container %s must match name in given container config", newName, ctr.ID())
+ return fmt.Errorf("new name %s for container %s must match name in given container config: %w", newName, ctr.ID(), define.ErrInvalidArg)
}
if newName != "" && oldName == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide old name for container if a new name is given")
+ return fmt.Errorf("must provide old name for container if a new name is given: %w", define.ErrInvalidArg)
}
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling new configuration JSON for container %s: %w", ctr.ID(), err)
}
db, err := s.getDBCon()
@@ -1689,7 +1969,7 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
// rename.
needsRename = false
} else {
- return errors.Wrapf(define.ErrCtrExists, "name %s already in use, cannot rename container %s", newName, ctr.ID())
+ return fmt.Errorf("name %s already in use, cannot rename container %s: %w", newName, ctr.ID(), define.ErrCtrExists)
}
}
@@ -1698,16 +1978,16 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
// buckets are ID-indexed so we just need to
// overwrite the values there.
if err := namesBkt.Delete([]byte(oldName)); err != nil {
- return errors.Wrapf(err, "error deleting container %s old name from DB for rename", ctr.ID())
+ return fmt.Errorf("error deleting container %s old name from DB for rename: %w", ctr.ID(), err)
}
if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
- return errors.Wrapf(err, "error renaming container %s in ID bucket in DB", ctr.ID())
+ return fmt.Errorf("error renaming container %s in ID bucket in DB: %w", ctr.ID(), err)
}
if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil {
- return errors.Wrapf(err, "error adding new name %s for container %s in DB", newName, ctr.ID())
+ return fmt.Errorf("error adding new name %s for container %s in DB: %w", newName, ctr.ID(), err)
}
if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
- return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID())
+ return fmt.Errorf("error renaming container %s in all containers bucket in DB: %w", ctr.ID(), err)
}
if ctr.config.Pod != "" {
podsBkt, err := getPodBucket(tx)
@@ -1716,14 +1996,14 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
}
podBkt := podsBkt.Bucket([]byte(ctr.config.Pod))
if podBkt == nil {
- return errors.Wrapf(define.ErrInternal, "bucket for pod %s does not exist", ctr.config.Pod)
+ return fmt.Errorf("bucket for pod %s does not exist: %w", ctr.config.Pod, define.ErrInternal)
}
podCtrBkt := podBkt.Bucket(containersBkt)
if podCtrBkt == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", ctr.config.Pod)
+ return fmt.Errorf("pod %s does not have a containers bucket: %w", ctr.config.Pod, define.ErrInternal)
}
if err := podCtrBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
- return errors.Wrapf(err, "error renaming container %s in pod %s members bucket", ctr.ID(), ctr.config.Pod)
+ return fmt.Errorf("error renaming container %s in pod %s members bucket: %w", ctr.ID(), ctr.config.Pod, err)
}
}
}
@@ -1737,11 +2017,11 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %q found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ return fmt.Errorf("error updating container %s config JSON: %w", ctr.ID(), err)
}
return nil
@@ -1763,7 +2043,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for pod %s", pod.ID())
+ return fmt.Errorf("error marshalling new configuration JSON for pod %s: %w", pod.ID(), err)
}
db, err := s.getDBCon()
@@ -1781,11 +2061,11 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
podDB := podBkt.Bucket([]byte(pod.ID()))
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
if err := podDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID())
+ return fmt.Errorf("error updating pod %s config JSON: %w", pod.ID(), err)
}
return nil
@@ -1807,7 +2087,7 @@ func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) er
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for volume %q", volume.Name())
+ return fmt.Errorf("error marshalling new configuration JSON for volume %q: %w", volume.Name(), err)
}
db, err := s.getDBCon()
@@ -1825,11 +2105,11 @@ func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) er
volDB := volBkt.Bucket([]byte(volume.Name()))
if volDB == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found in DB", volume.Name())
+ return fmt.Errorf("no volume with name %q found in DB: %w", volume.Name(), define.ErrNoSuchVolume)
}
if err := volDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating volume %q config JSON", volume.Name())
+ return fmt.Errorf("error updating volume %q config JSON: %w", volume.Name(), err)
}
return nil
@@ -1953,7 +2233,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
}
if strings.HasPrefix(string(checkID), idOrName) {
if exists {
- return errors.Wrapf(define.ErrPodExists, "more than one result for ID or name %s", idOrName)
+ return fmt.Errorf("more than one result for ID or name %s: %w", idOrName, define.ErrPodExists)
}
id = checkID
exists = true
@@ -1965,9 +2245,9 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
return err
} else if !exists {
if isCtr {
- return errors.Wrapf(define.ErrNoSuchPod, "%s is a container, not a pod", idOrName)
+ return fmt.Errorf("%s is a container, not a pod: %w", idOrName, define.ErrNoSuchPod)
}
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with name or ID %s found", idOrName)
+ return fmt.Errorf("no pod with name or ID %s found: %w", idOrName, define.ErrNoSuchPod)
}
// We might have found a container ID, but it's OK
@@ -2043,7 +2323,7 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return false, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return false, fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
ctrID := []byte(id)
@@ -2067,13 +2347,13 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return fmt.Errorf("pod %s not found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return fmt.Errorf("pod %s missing containers bucket in DB: %w", pod.ID(), define.ErrInternal)
}
// Don't bother with a namespace check on the container -
@@ -2106,7 +2386,7 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2129,13 +2409,13 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return fmt.Errorf("pod %s not found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return fmt.Errorf("pod %s missing containers bucket in DB: %w", pod.ID(), define.ErrInternal)
}
// Iterate through all containers in the pod
@@ -2168,7 +2448,7 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2196,13 +2476,13 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return fmt.Errorf("pod %s not found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return fmt.Errorf("pod %s missing containers bucket in DB: %w", pod.ID(), define.ErrInternal)
}
// Iterate through all containers in the pod
@@ -2242,7 +2522,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
volConfigJSON, err := json.Marshal(volume.config)
if err != nil {
- return errors.Wrapf(err, "error marshalling volume %s config to JSON", volume.Name())
+ return fmt.Errorf("error marshalling volume %s config to JSON: %w", volume.Name(), err)
}
// Volume state is allowed to not exist
@@ -2250,7 +2530,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
if volume.state != nil {
volStateJSON, err = json.Marshal(volume.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name())
+ return fmt.Errorf("error marshalling volume %s state to JSON: %w", volume.Name(), err)
}
}
@@ -2274,34 +2554,34 @@ func (s *BoltState) AddVolume(volume *Volume) error {
// Check if we already have a volume with the given name
volExists := allVolsBkt.Get(volName)
if volExists != nil {
- return errors.Wrapf(define.ErrVolumeExists, "name %s is in use", volume.Name())
+ return fmt.Errorf("name %s is in use: %w", volume.Name(), define.ErrVolumeExists)
}
// We are good to add the volume
// Make a bucket for it
newVol, err := volBkt.CreateBucket(volName)
if err != nil {
- return errors.Wrapf(err, "error creating bucket for volume %s", volume.Name())
+ return fmt.Errorf("error creating bucket for volume %s: %w", volume.Name(), err)
}
// Make a subbucket for the containers using the volume. Dependent container IDs will be addedremoved to
// this bucket in addcontainer/removeContainer
if _, err := newVol.CreateBucket(volDependenciesBkt); err != nil {
- return errors.Wrapf(err, "error creating bucket for containers using volume %s", volume.Name())
+ return fmt.Errorf("error creating bucket for containers using volume %s: %w", volume.Name(), err)
}
if err := newVol.Put(configKey, volConfigJSON); err != nil {
- return errors.Wrapf(err, "error storing volume %s configuration in DB", volume.Name())
+ return fmt.Errorf("error storing volume %s configuration in DB: %w", volume.Name(), err)
}
if volStateJSON != nil {
if err := newVol.Put(stateKey, volStateJSON); err != nil {
- return errors.Wrapf(err, "error storing volume %s state in DB", volume.Name())
+ return fmt.Errorf("error storing volume %s state in DB: %w", volume.Name(), err)
}
}
if err := allVolsBkt.Put(volName, volName); err != nil {
- return errors.Wrapf(err, "error storing volume %s in all volumes bucket in DB", volume.Name())
+ return fmt.Errorf("error storing volume %s in all volumes bucket in DB: %w", volume.Name(), err)
}
return nil
@@ -2343,7 +2623,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
volDB := volBkt.Bucket(volName)
if volDB == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name())
+ return fmt.Errorf("volume %s does not exist in DB: %w", volume.Name(), define.ErrNoSuchVolume)
}
// Check if volume is not being used by any container
@@ -2370,20 +2650,20 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return nil
})
if err != nil {
- return errors.Wrapf(err, "error getting list of dependencies from dependencies bucket for volumes %q", volume.Name())
+ return fmt.Errorf("error getting list of dependencies from dependencies bucket for volumes %q: %w", volume.Name(), err)
}
if len(deps) > 0 {
- return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ","))
+ return fmt.Errorf("volume %s is being used by container(s) %s: %w", volume.Name(), strings.Join(deps, ","), define.ErrVolumeBeingUsed)
}
}
// volume is ready for removal
// Let's kick it out
if err := allVolsBkt.Delete(volName); err != nil {
- return errors.Wrapf(err, "error removing volume %s from all volumes bucket in DB", volume.Name())
+ return fmt.Errorf("error removing volume %s from all volumes bucket in DB: %w", volume.Name(), err)
}
if err := volBkt.DeleteBucket(volName); err != nil {
- return errors.Wrapf(err, "error removing volume %s from DB", volume.Name())
+ return fmt.Errorf("error removing volume %s from DB: %w", volume.Name(), err)
}
return nil
@@ -2419,7 +2699,7 @@ func (s *BoltState) UpdateVolume(volume *Volume) error {
volToUpdate := volBucket.Bucket(volumeName)
if volToUpdate == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ return fmt.Errorf("no volume with name %s found in database: %w", volume.Name(), define.ErrNoSuchVolume)
}
stateBytes := volToUpdate.Get(stateKey)
@@ -2430,7 +2710,7 @@ func (s *BoltState) UpdateVolume(volume *Volume) error {
}
if err := json.Unmarshal(stateBytes, newState); err != nil {
- return errors.Wrapf(err, "error unmarshalling volume %s state", volume.Name())
+ return fmt.Errorf("error unmarshalling volume %s state: %w", volume.Name(), err)
}
return nil
@@ -2460,7 +2740,7 @@ func (s *BoltState) SaveVolume(volume *Volume) error {
if volume.state != nil {
stateJSON, err := json.Marshal(volume.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name())
+ return fmt.Errorf("error marshalling volume %s state to JSON: %w", volume.Name(), err)
}
newStateJSON = stateJSON
}
@@ -2480,7 +2760,7 @@ func (s *BoltState) SaveVolume(volume *Volume) error {
volToUpdate := volBucket.Bucket(volumeName)
if volToUpdate == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ return fmt.Errorf("no volume with name %s found in database: %w", volume.Name(), define.ErrNoSuchVolume)
}
return volToUpdate.Put(stateKey, newStateJSON)
@@ -2517,7 +2797,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
// This check can be removed if performance becomes an
// issue, but much less helpful errors will be produced
if volExists == nil {
- return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ return fmt.Errorf("inconsistency in state - volume %s is in all volumes bucket but volume not found: %w", string(id), define.ErrInternal)
}
volume := new(Volume)
@@ -2525,7 +2805,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
volume.state = new(VolumeState)
if err := s.getVolumeFromDB(id, volume, volBucket); err != nil {
- if errors.Cause(err) != define.ErrNSMismatch {
+ if !errors.Is(err, define.ErrNSMismatch) {
logrus.Errorf("Retrieving volume %s from the database: %v", string(id), err)
}
} else {
@@ -2624,7 +2904,7 @@ func (s *BoltState) LookupVolume(name string) (*Volume, error) {
err = allVolsBkt.ForEach(func(checkName, checkName2 []byte) error {
if strings.HasPrefix(string(checkName), name) {
if foundMatch {
- return errors.Wrapf(define.ErrVolumeExists, "more than one result for volume name %q", name)
+ return fmt.Errorf("more than one result for volume name %q: %w", name, define.ErrVolumeExists)
}
foundMatch = true
volName = checkName
@@ -2636,7 +2916,7 @@ func (s *BoltState) LookupVolume(name string) (*Volume, error) {
}
if !foundMatch {
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found", name)
+ return fmt.Errorf("no volume with name %q found: %w", name, define.ErrNoSuchVolume)
}
return s.getVolumeFromDB(volName, volume, volBkt)
@@ -2722,12 +3002,12 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
volDB := volBucket.Bucket([]byte(volume.Name()))
if volDB == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name())
+ return fmt.Errorf("no volume with name %s found in DB: %w", volume.Name(), define.ErrNoSuchVolume)
}
dependsBkt := volDB.Bucket(volDependenciesBkt)
if dependsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "volume %s has no dependencies bucket", volume.Name())
+ return fmt.Errorf("volume %s has no dependencies bucket: %w", volume.Name(), define.ErrInternal)
}
// Iterate through and add dependencies
@@ -2767,7 +3047,7 @@ func (s *BoltState) AddPod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2780,12 +3060,12 @@ func (s *BoltState) AddPod(pod *Pod) error {
podConfigJSON, err := json.Marshal(pod.config)
if err != nil {
- return errors.Wrapf(err, "error marshalling pod %s config to JSON", pod.ID())
+ return fmt.Errorf("error marshalling pod %s config to JSON: %w", pod.ID(), err)
}
podStateJSON, err := json.Marshal(pod.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling pod %s state to JSON", pod.ID())
+ return fmt.Errorf("error marshalling pod %s state to JSON: %w", pod.ID(), err)
}
db, err := s.getDBCon()
@@ -2827,7 +3107,7 @@ func (s *BoltState) AddPod(pod *Pod) error {
if allPodsBkt.Get(idExist) == nil {
err = define.ErrCtrExists
}
- return errors.Wrapf(err, "ID \"%s\" is in use", pod.ID())
+ return fmt.Errorf("ID \"%s\" is in use: %w", pod.ID(), err)
}
nameExist := namesBkt.Get(podName)
if nameExist != nil {
@@ -2835,47 +3115,47 @@ func (s *BoltState) AddPod(pod *Pod) error {
if allPodsBkt.Get(nameExist) == nil {
err = define.ErrCtrExists
}
- return errors.Wrapf(err, "name \"%s\" is in use", pod.Name())
+ return fmt.Errorf("name \"%s\" is in use: %w", pod.Name(), err)
}
// We are good to add the pod
// Make a bucket for it
newPod, err := podBkt.CreateBucket(podID)
if err != nil {
- return errors.Wrapf(err, "error creating bucket for pod %s", pod.ID())
+ return fmt.Errorf("error creating bucket for pod %s: %w", pod.ID(), err)
}
// Make a subbucket for pod containers
if _, err := newPod.CreateBucket(containersBkt); err != nil {
- return errors.Wrapf(err, "error creating bucket for pod %s containers", pod.ID())
+ return fmt.Errorf("error creating bucket for pod %s containers: %w", pod.ID(), err)
}
if err := newPod.Put(configKey, podConfigJSON); err != nil {
- return errors.Wrapf(err, "error storing pod %s configuration in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s configuration in DB: %w", pod.ID(), err)
}
if err := newPod.Put(stateKey, podStateJSON); err != nil {
- return errors.Wrapf(err, "error storing pod %s state JSON in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s state JSON in DB: %w", pod.ID(), err)
}
if podNamespace != nil {
if err := newPod.Put(namespaceKey, podNamespace); err != nil {
- return errors.Wrapf(err, "error storing pod %s namespace in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s namespace in DB: %w", pod.ID(), err)
}
if err := nsBkt.Put(podID, podNamespace); err != nil {
- return errors.Wrapf(err, "error storing pod %s namespace in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s namespace in DB: %w", pod.ID(), err)
}
}
// Add us to the ID and names buckets
if err := idsBkt.Put(podID, podName); err != nil {
- return errors.Wrapf(err, "error storing pod %s ID in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s ID in DB: %w", pod.ID(), err)
}
if err := namesBkt.Put(podName, podID); err != nil {
- return errors.Wrapf(err, "error storing pod %s name in DB", pod.Name())
+ return fmt.Errorf("error storing pod %s name in DB: %w", pod.Name(), err)
}
if err := allPodsBkt.Put(podID, podName); err != nil {
- return errors.Wrapf(err, "error storing pod %s in all pods bucket in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s in all pods bucket in DB: %w", pod.ID(), err)
}
return nil
@@ -2899,7 +3179,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2941,7 +3221,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
+ return fmt.Errorf("pod %s does not exist in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
// Check if pod is empty
@@ -2953,26 +3233,26 @@ func (s *BoltState) RemovePod(pod *Pod) error {
if podCtrsBkt != nil {
cursor := podCtrsBkt.Cursor()
if id, _ := cursor.First(); id != nil {
- return errors.Wrapf(define.ErrCtrExists, "pod %s is not empty", pod.ID())
+ return fmt.Errorf("pod %s is not empty: %w", pod.ID(), define.ErrCtrExists)
}
}
// Pod is empty, and ready for removal
// Let's kick it out
if err := idsBkt.Delete(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s ID from DB", pod.ID())
+ return fmt.Errorf("error removing pod %s ID from DB: %w", pod.ID(), err)
}
if err := namesBkt.Delete(podName); err != nil {
- return errors.Wrapf(err, "error removing pod %s name (%s) from DB", pod.ID(), pod.Name())
+ return fmt.Errorf("error removing pod %s name (%s) from DB: %w", pod.ID(), pod.Name(), err)
}
if err := nsBkt.Delete(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s namespace from DB", pod.ID())
+ return fmt.Errorf("error removing pod %s namespace from DB: %w", pod.ID(), err)
}
if err := allPodsBkt.Delete(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s ID from all pods bucket in DB", pod.ID())
+ return fmt.Errorf("error removing pod %s ID from all pods bucket in DB: %w", pod.ID(), err)
}
if err := podBkt.DeleteBucket(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s from DB", pod.ID())
+ return fmt.Errorf("error removing pod %s from DB: %w", pod.ID(), err)
}
return nil
@@ -2995,7 +3275,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -3036,12 +3316,12 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
+ return fmt.Errorf("pod %s does not exist in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
podCtrsBkt := podDB.Bucket(containersBkt)
if podCtrsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID())
+ return fmt.Errorf("pod %s does not have a containers bucket: %w", pod.ID(), define.ErrInternal)
}
// Traverse all containers in the pod with a cursor
@@ -3052,7 +3332,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
if ctr == nil {
// This should never happen
// State is inconsistent
- return errors.Wrapf(define.ErrNoSuchCtr, "pod %s referenced nonexistent container %s", pod.ID(), string(id))
+ return fmt.Errorf("pod %s referenced nonexistent container %s: %w", pod.ID(), string(id), define.ErrNoSuchCtr)
}
ctrDeps := ctr.Bucket(dependenciesBkt)
// This should never be nil, but if it is, we're
@@ -3061,7 +3341,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
err = ctrDeps.ForEach(func(depID, name []byte) error {
exists := podCtrsBkt.Get(depID)
if exists == nil {
- return errors.Wrapf(define.ErrCtrExists, "container %s has dependency %s outside of pod %s", string(id), string(depID), pod.ID())
+ return fmt.Errorf("container %s has dependency %s outside of pod %s: %w", string(id), string(depID), pod.ID(), define.ErrCtrExists)
}
return nil
})
@@ -3073,19 +3353,19 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
// Dependencies are set, we're clear to remove
if err := ctrBkt.DeleteBucket(id); err != nil {
- return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", string(id))
+ return fmt.Errorf("error deleting container %s from DB: %w", string(id), define.ErrInternal)
}
if err := idsBkt.Delete(id); err != nil {
- return errors.Wrapf(err, "error deleting container %s ID in DB", string(id))
+ return fmt.Errorf("error deleting container %s ID in DB: %w", string(id), err)
}
if err := namesBkt.Delete(name); err != nil {
- return errors.Wrapf(err, "error deleting container %s name in DB", string(id))
+ return fmt.Errorf("error deleting container %s name in DB: %w", string(id), err)
}
if err := allCtrsBkt.Delete(id); err != nil {
- return errors.Wrapf(err, "error deleting container %s ID from all containers bucket in DB", string(id))
+ return fmt.Errorf("error deleting container %s ID from all containers bucket in DB: %w", string(id), err)
}
return nil
@@ -3096,10 +3376,10 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
// Delete and recreate the bucket to empty it
if err := podDB.DeleteBucket(containersBkt); err != nil {
- return errors.Wrapf(err, "error removing pod %s containers bucket", pod.ID())
+ return fmt.Errorf("error removing pod %s containers bucket: %w", pod.ID(), err)
}
if _, err := podDB.CreateBucket(containersBkt); err != nil {
- return errors.Wrapf(err, "error recreating pod %s containers bucket", pod.ID())
+ return fmt.Errorf("error recreating pod %s containers bucket: %w", pod.ID(), err)
}
return nil
@@ -3127,7 +3407,7 @@ func (s *BoltState) AddContainerToPod(pod *Pod, ctr *Container) error {
}
if ctr.config.Pod != pod.ID() {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not part of pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("container %s is not part of pod %s: %w", ctr.ID(), pod.ID(), define.ErrNoSuchCtr)
}
return s.addContainer(ctr, pod)
@@ -3146,19 +3426,19 @@ func (s *BoltState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
if s.namespace != "" {
if s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
if s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s in in namespace %q but we are in namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s in in namespace %q but we are in namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
}
if ctr.config.Pod == "" {
- return errors.Wrapf(define.ErrNoSuchPod, "container %s is not part of a pod, use RemoveContainer instead", ctr.ID())
+ return fmt.Errorf("container %s is not part of a pod, use RemoveContainer instead: %w", ctr.ID(), define.ErrNoSuchPod)
}
if ctr.config.Pod != pod.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "container %s is not part of pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("container %s is not part of pod %s: %w", ctr.ID(), pod.ID(), define.ErrInvalidArg)
}
db, err := s.getDBCon()
@@ -3184,7 +3464,7 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
newState := new(podState)
@@ -3206,17 +3486,17 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get the pod state JSON
podStateBytes := podDB.Get(stateKey)
if podStateBytes == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s is missing state key in DB", pod.ID())
+ return fmt.Errorf("pod %s is missing state key in DB: %w", pod.ID(), define.ErrInternal)
}
if err := json.Unmarshal(podStateBytes, newState); err != nil {
- return errors.Wrapf(err, "error unmarshalling pod %s state JSON", pod.ID())
+ return fmt.Errorf("error unmarshalling pod %s state JSON: %w", pod.ID(), err)
}
return nil
@@ -3241,12 +3521,12 @@ func (s *BoltState) SavePod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
stateJSON, err := json.Marshal(pod.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling pod %s state to JSON", pod.ID())
+ return fmt.Errorf("error marshalling pod %s state to JSON: %w", pod.ID(), err)
}
db, err := s.getDBCon()
@@ -3266,12 +3546,12 @@ func (s *BoltState) SavePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Set the pod state JSON
if err := podDB.Put(stateKey, stateJSON); err != nil {
- return errors.Wrapf(err, "error updating pod %s state in database", pod.ID())
+ return fmt.Errorf("error updating pod %s state in database: %w", pod.ID(), err)
}
return nil
@@ -3313,7 +3593,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
// This check can be removed if performance becomes an
// issue, but much less helpful errors will be produced
if podExists == nil {
- return errors.Wrapf(define.ErrInternal, "inconsistency in state - pod %s is in all pods bucket but pod not found", string(id))
+ return fmt.Errorf("inconsistency in state - pod %s is in all pods bucket but pod not found: %w", string(id), define.ErrInternal)
}
pod := new(Pod)
@@ -3321,7 +3601,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
pod.state = new(podState)
if err := s.getPodFromDB(id, pod, podBucket); err != nil {
- if errors.Cause(err) != define.ErrNSMismatch {
+ if !errors.Is(err, define.ErrNSMismatch) {
logrus.Errorf("Retrieving pod %s from the database: %v", string(id), err)
}
} else {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index d6f035af9..f28fadfa9 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -2,6 +2,7 @@ package libpod
import (
"bytes"
+ "fmt"
"os"
"path/filepath"
"runtime"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
@@ -29,6 +29,9 @@ const (
aliasesName = "aliases"
runtimeConfigName = "runtime-config"
+ exitCodeName = "exit-code"
+ exitCodeTimeStampName = "exit-code-time-stamp"
+
configName = "config"
stateName = "state"
dependenciesName = "dependencies"
@@ -65,6 +68,9 @@ var (
volDependenciesBkt = []byte(volCtrDependencies)
networksBkt = []byte(networksName)
+ exitCodeBkt = []byte(exitCodeName)
+ exitCodeTimeStampBkt = []byte(exitCodeTimeStampName)
+
configKey = []byte(configName)
stateKey = []byte(stateName)
netNSKey = []byte(netNSName)
@@ -189,7 +195,7 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
}
if err := configBkt.Put(missing.key, dbValue); err != nil {
- return errors.Wrapf(err, "error updating %s in DB runtime config", missing.name)
+ return fmt.Errorf("error updating %s in DB runtime config: %w", missing.name, err)
}
}
@@ -230,8 +236,8 @@ func readOnlyValidateConfig(bucket *bolt.Bucket, toCheck dbConfigValidation) (bo
return true, nil
}
- return true, errors.Wrapf(define.ErrDBBadConfig, "database %s %q does not match our %s %q",
- toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue)
+ return true, fmt.Errorf("database %s %q does not match our %s %q: %w",
+ toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue, define.ErrDBBadConfig)
}
return true, nil
@@ -248,7 +254,7 @@ func (s *BoltState) getDBCon() (*bolt.DB, error) {
db, err := bolt.Open(s.dbPath, 0600, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error opening database %s", s.dbPath)
+ return nil, fmt.Errorf("error opening database %s: %w", s.dbPath, err)
}
return db, nil
@@ -277,7 +283,7 @@ func (s *BoltState) closeDBCon(db *bolt.DB) error {
func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(idRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "id registry bucket not found in DB")
+ return nil, fmt.Errorf("id registry bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -285,7 +291,7 @@ func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(nameRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "name registry bucket not found in DB")
+ return nil, fmt.Errorf("name registry bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -293,7 +299,7 @@ func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(nsRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "namespace registry bucket not found in DB")
+ return nil, fmt.Errorf("namespace registry bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -301,7 +307,7 @@ func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(ctrBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "containers bucket not found in DB")
+ return nil, fmt.Errorf("containers bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -309,7 +315,7 @@ func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allCtrsBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "all containers bucket not found in DB")
+ return nil, fmt.Errorf("all containers bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -317,7 +323,7 @@ func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(podBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "pods bucket not found in DB")
+ return nil, fmt.Errorf("pods bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -325,7 +331,7 @@ func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allPodsBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "all pods bucket not found in DB")
+ return nil, fmt.Errorf("all pods bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -333,7 +339,7 @@ func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(volBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "volumes bucket not found in DB")
+ return nil, fmt.Errorf("volumes bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -341,7 +347,7 @@ func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allVolsBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "all volumes bucket not found in DB")
+ return nil, fmt.Errorf("all volumes bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -349,7 +355,7 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(execBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "exec bucket not found in DB")
+ return nil, fmt.Errorf("exec bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -357,7 +363,23 @@ func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(runtimeConfigBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "runtime configuration bucket not found in DB")
+ return nil, fmt.Errorf("runtime configuration bucket not found in DB: %w", define.ErrDBBadConfig)
+ }
+ return bkt, nil
+}
+
+func getExitCodeBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(exitCodeBkt)
+ if bkt == nil {
+ return nil, fmt.Errorf("exit-code container bucket not found in DB: %w", define.ErrDBBadConfig)
+ }
+ return bkt, nil
+}
+
+func getExitCodeTimeStampBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(exitCodeTimeStampBkt)
+ if bkt == nil {
+ return nil, fmt.Errorf("exit-code time stamp bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -365,23 +387,23 @@ func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func (s *BoltState) getContainerConfigFromDB(id []byte, config *ContainerConfig, ctrsBkt *bolt.Bucket) error {
ctrBkt := ctrsBkt.Bucket(id)
if ctrBkt == nil {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id))
+ return fmt.Errorf("container %s not found in DB: %w", string(id), define.ErrNoSuchCtr)
}
if s.namespaceBytes != nil {
ctrNamespaceBytes := ctrBkt.Get(namespaceKey)
if !bytes.Equal(s.namespaceBytes, ctrNamespaceBytes) {
- return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve container %s as it is part of namespace %q and we are in namespace %q", string(id), string(ctrNamespaceBytes), s.namespace)
+ return fmt.Errorf("cannot retrieve container %s as it is part of namespace %q and we are in namespace %q: %w", string(id), string(ctrNamespaceBytes), s.namespace, define.ErrNSMismatch)
}
}
configBytes := ctrBkt.Get(configKey)
if configBytes == nil {
- return errors.Wrapf(define.ErrInternal, "container %s missing config key in DB", string(id))
+ return fmt.Errorf("container %s missing config key in DB: %w", string(id), define.ErrInternal)
}
if err := json.Unmarshal(configBytes, config); err != nil {
- return errors.Wrapf(err, "error unmarshalling container %s config", string(id))
+ return fmt.Errorf("error unmarshalling container %s config: %w", string(id), err)
}
// convert ports to the new format if needed
@@ -404,7 +426,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(ctr.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock for container %s", string(id))
+ return fmt.Errorf("error retrieving lock for container %s: %w", string(id), err)
}
ctr.lock = lock
@@ -451,29 +473,29 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error {
podDB := podBkt.Bucket(id)
if podDB == nil {
- return errors.Wrapf(define.ErrNoSuchPod, "pod with ID %s not found", string(id))
+ return fmt.Errorf("pod with ID %s not found: %w", string(id), define.ErrNoSuchPod)
}
if s.namespaceBytes != nil {
podNamespaceBytes := podDB.Get(namespaceKey)
if !bytes.Equal(s.namespaceBytes, podNamespaceBytes) {
- return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q", string(id), string(podNamespaceBytes), s.namespace)
+ return fmt.Errorf("cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q: %w", string(id), string(podNamespaceBytes), s.namespace, define.ErrNSMismatch)
}
}
podConfigBytes := podDB.Get(configKey)
if podConfigBytes == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s is missing configuration key in DB", string(id))
+ return fmt.Errorf("pod %s is missing configuration key in DB: %w", string(id), define.ErrInternal)
}
if err := json.Unmarshal(podConfigBytes, pod.config); err != nil {
- return errors.Wrapf(err, "error unmarshalling pod %s config from DB", string(id))
+ return fmt.Errorf("error unmarshalling pod %s config from DB: %w", string(id), err)
}
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(pod.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock for pod %s", string(id))
+ return fmt.Errorf("error retrieving lock for pod %s: %w", string(id), err)
}
pod.lock = lock
@@ -486,29 +508,29 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bucket) error {
volDB := volBkt.Bucket(name)
if volDB == nil {
- return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %s not found", string(name))
+ return fmt.Errorf("volume with name %s not found: %w", string(name), define.ErrNoSuchVolume)
}
volConfigBytes := volDB.Get(configKey)
if volConfigBytes == nil {
- return errors.Wrapf(define.ErrInternal, "volume %s is missing configuration key in DB", string(name))
+ return fmt.Errorf("volume %s is missing configuration key in DB: %w", string(name), define.ErrInternal)
}
if err := json.Unmarshal(volConfigBytes, volume.config); err != nil {
- return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
+ return fmt.Errorf("error unmarshalling volume %s config from DB: %w", string(name), err)
}
// Volume state is allowed to be nil for legacy compatibility
volStateBytes := volDB.Get(stateKey)
if volStateBytes != nil {
if err := json.Unmarshal(volStateBytes, volume.state); err != nil {
- return errors.Wrapf(err, "error unmarshalling volume %s state from DB", string(name))
+ return fmt.Errorf("error unmarshalling volume %s state from DB: %w", string(name), err)
}
}
// Retrieve volume driver
if volume.UsesVolumeDriver() {
- plugin, err := s.runtime.getVolumePlugin(volume.config.Driver)
+ plugin, err := s.runtime.getVolumePlugin(volume.config)
if err != nil {
// We want to fail gracefully here, to ensure that we
// can still remove volumes even if their plugin is
@@ -524,7 +546,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock for volume %q", string(name))
+ return fmt.Errorf("error retrieving lock for volume %q: %w", string(name), err)
}
volume.lock = lock
@@ -538,8 +560,8 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
// If pod is not nil, the container is added to the pod as well
func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "cannot add container %s as it is in namespace %q and we are in namespace %q",
- ctr.ID(), s.namespace, ctr.config.Namespace)
+ return fmt.Errorf("cannot add container %s as it is in namespace %q and we are in namespace %q: %w",
+ ctr.ID(), s.namespace, ctr.config.Namespace, define.ErrNSMismatch)
}
// Set the original networks to nil. We can save some space by not storing it in the config
@@ -550,11 +572,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
// JSON container structs to insert into DB
configJSON, err := json.Marshal(ctr.config)
if err != nil {
- return errors.Wrapf(err, "error marshalling container %s config to JSON", ctr.ID())
+ return fmt.Errorf("error marshalling container %s config to JSON: %w", ctr.ID(), err)
}
stateJSON, err := json.Marshal(ctr.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling container %s state to JSON", ctr.ID())
+ return fmt.Errorf("error marshalling container %s state to JSON: %w", ctr.ID(), err)
}
netNSPath := getNetNSPath(ctr)
dependsCtrs := ctr.Dependencies()
@@ -572,16 +594,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
for net, opts := range configNetworks {
// Check that we don't have any empty network names
if net == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string")
+ return fmt.Errorf("network names cannot be an empty string: %w", define.ErrInvalidArg)
}
if opts.InterfaceName == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network interface name cannot be an empty string")
+ return fmt.Errorf("network interface name cannot be an empty string: %w", define.ErrInvalidArg)
}
// always add the short id as alias for docker compat
opts.Aliases = append(opts.Aliases, ctr.config.ID[:12])
optBytes, err := json.Marshal(opts)
if err != nil {
- return errors.Wrapf(err, "error marshalling network options JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling network options JSON for container %s: %w", ctr.ID(), err)
}
networks[net] = optBytes
}
@@ -637,17 +659,17 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
podDB = podBucket.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in database", pod.ID())
+ return fmt.Errorf("pod %s does not exist in database: %w", pod.ID(), define.ErrNoSuchPod)
}
podCtrs = podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID())
+ return fmt.Errorf("pod %s does not have a containers bucket: %w", pod.ID(), define.ErrInternal)
}
podNS := podDB.Get(namespaceKey)
if !bytes.Equal(podNS, ctrNamespace) {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s",
- ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace)
+ return fmt.Errorf("container %s is in namespace %s and pod %s is in namespace %s: %w",
+ ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace, define.ErrNSMismatch)
}
}
@@ -658,7 +680,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if allCtrsBucket.Get(idExist) == nil {
err = define.ErrPodExists
}
- return errors.Wrapf(err, "ID \"%s\" is in use", ctr.ID())
+ return fmt.Errorf("ID \"%s\" is in use: %w", ctr.ID(), err)
}
nameExist := namesBucket.Get(ctrName)
if nameExist != nil {
@@ -666,66 +688,66 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if allCtrsBucket.Get(nameExist) == nil {
err = define.ErrPodExists
}
- return errors.Wrapf(err, "name \"%s\" is in use", ctr.Name())
+ return fmt.Errorf("name \"%s\" is in use: %w", ctr.Name(), err)
}
// No overlapping containers
// Add the new container to the DB
if err := idsBucket.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding container %s ID to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s ID to DB: %w", ctr.ID(), err)
}
if err := namesBucket.Put(ctrName, ctrID); err != nil {
- return errors.Wrapf(err, "error adding container %s name (%s) to DB", ctr.ID(), ctr.Name())
+ return fmt.Errorf("error adding container %s name (%s) to DB: %w", ctr.ID(), ctr.Name(), err)
}
if ctrNamespace != nil {
if err := nsBucket.Put(ctrID, ctrNamespace); err != nil {
- return errors.Wrapf(err, "error adding container %s namespace (%q) to DB", ctr.ID(), ctr.Namespace())
+ return fmt.Errorf("error adding container %s namespace (%q) to DB: %w", ctr.ID(), ctr.Namespace(), err)
}
}
if err := allCtrsBucket.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding container %s to all containers bucket in DB", ctr.ID())
+ return fmt.Errorf("error adding container %s to all containers bucket in DB: %w", ctr.ID(), err)
}
newCtrBkt, err := ctrBucket.CreateBucket(ctrID)
if err != nil {
- return errors.Wrapf(err, "error adding container %s bucket to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s bucket to DB: %w", ctr.ID(), err)
}
if err := newCtrBkt.Put(configKey, configJSON); err != nil {
- return errors.Wrapf(err, "error adding container %s config to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s config to DB: %w", ctr.ID(), err)
}
if err := newCtrBkt.Put(stateKey, stateJSON); err != nil {
- return errors.Wrapf(err, "error adding container %s state to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s state to DB: %w", ctr.ID(), err)
}
if ctrNamespace != nil {
if err := newCtrBkt.Put(namespaceKey, ctrNamespace); err != nil {
- return errors.Wrapf(err, "error adding container %s namespace to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s namespace to DB: %w", ctr.ID(), err)
}
}
if pod != nil {
if err := newCtrBkt.Put(podIDKey, []byte(pod.ID())); err != nil {
- return errors.Wrapf(err, "error adding container %s pod to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s pod to DB: %w", ctr.ID(), err)
}
}
if netNSPath != "" {
if err := newCtrBkt.Put(netNSKey, []byte(netNSPath)); err != nil {
- return errors.Wrapf(err, "error adding container %s netns path to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s netns path to DB: %w", ctr.ID(), err)
}
}
if len(networks) > 0 {
ctrNetworksBkt, err := newCtrBkt.CreateBucket(networksBkt)
if err != nil {
- return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating networks bucket for container %s: %w", ctr.ID(), err)
}
for network, opts := range networks {
if err := ctrNetworksBkt.Put([]byte(network), opts); err != nil {
- return errors.Wrapf(err, "error adding network %q to networks bucket for container %s", network, ctr.ID())
+ return fmt.Errorf("error adding network %q to networks bucket for container %s: %w", network, ctr.ID(), err)
}
}
}
if _, err := newCtrBkt.CreateBucket(dependenciesBkt); err != nil {
- return errors.Wrapf(err, "error creating dependencies bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating dependencies bucket for container %s: %w", ctr.ID(), err)
}
// Add dependencies for the container
@@ -734,42 +756,42 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
depCtrBkt := ctrBucket.Bucket(depCtrID)
if depCtrBkt == nil {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s, but it does not exist in the DB", ctr.ID(), dependsCtr)
+ return fmt.Errorf("container %s depends on container %s, but it does not exist in the DB: %w", ctr.ID(), dependsCtr, define.ErrNoSuchCtr)
}
depCtrPod := depCtrBkt.Get(podIDKey)
if pod != nil {
// If we're part of a pod, make sure the dependency is part of the same pod
if depCtrPod == nil {
- return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is not in pod %s", ctr.ID(), dependsCtr, pod.ID())
+ return fmt.Errorf("container %s depends on container %s which is not in pod %s: %w", ctr.ID(), dependsCtr, pod.ID(), define.ErrInvalidArg)
}
if string(depCtrPod) != pod.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a different pod (%s)", ctr.ID(), dependsCtr, string(depCtrPod))
+ return fmt.Errorf("container %s depends on container %s which is in a different pod (%s): %w", ctr.ID(), dependsCtr, string(depCtrPod), define.ErrInvalidArg)
}
} else if depCtrPod != nil {
// If we're not part of a pod, we cannot depend on containers in a pod
- return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods", ctr.ID(), dependsCtr)
+ return fmt.Errorf("container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods: %w", ctr.ID(), dependsCtr, define.ErrInvalidArg)
}
depNamespace := depCtrBkt.Get(namespaceKey)
if !bytes.Equal(ctrNamespace, depNamespace) {
- return errors.Wrapf(define.ErrNSMismatch, "container %s in namespace %q depends on container %s in namespace %q - namespaces must match", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace))
+ return fmt.Errorf("container %s in namespace %q depends on container %s in namespace %q - namespaces must match: %w", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace), define.ErrNSMismatch)
}
depCtrDependsBkt := depCtrBkt.Bucket(dependenciesBkt)
if depCtrDependsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", dependsCtr)
+ return fmt.Errorf("container %s does not have a dependencies bucket: %w", dependsCtr, define.ErrInternal)
}
if err := depCtrDependsBkt.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding ctr %s as dependency of container %s", ctr.ID(), dependsCtr)
+ return fmt.Errorf("error adding ctr %s as dependency of container %s: %w", ctr.ID(), dependsCtr, err)
}
}
// Add ctr to pod
if pod != nil && podCtrs != nil {
if err := podCtrs.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("error adding container %s to pod %s: %w", ctr.ID(), pod.ID(), err)
}
}
@@ -777,16 +799,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
for _, vol := range ctr.config.NamedVolumes {
volDB := volBkt.Bucket([]byte(vol.Name))
if volDB == nil {
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
+ return fmt.Errorf("no volume with name %s found in database when adding container %s: %w", vol.Name, ctr.ID(), define.ErrNoSuchVolume)
}
ctrDepsBkt, err := volDB.CreateBucketIfNotExists(volDependenciesBkt)
if err != nil {
- return errors.Wrapf(err, "error creating volume %s dependencies bucket to add container %s", vol.Name, ctr.ID())
+ return fmt.Errorf("error creating volume %s dependencies bucket to add container %s: %w", vol.Name, ctr.ID(), err)
}
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
- return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name)
+ return fmt.Errorf("error adding container %s to volume %s dependencies: %w", ctr.ID(), vol.Name, err)
}
}
}
@@ -846,7 +868,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
podDB = podBucket.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
}
@@ -854,17 +876,17 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
ctrExists := ctrBucket.Bucket(ctrID)
if ctrExists == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %s found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
// Compare namespace
// We can't remove containers not in our namespace
if s.namespace != "" {
if s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
if pod != nil && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q, does not match out namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q, does not match out namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
}
@@ -877,10 +899,10 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
} else {
ctrInPod := podCtrs.Get(ctrID)
if ctrInPod == nil {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not in pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("container %s is not in pod %s: %w", ctr.ID(), pod.ID(), define.ErrNoSuchCtr)
}
if err := podCtrs.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error removing container %s from pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("error removing container %s from pod %s: %w", ctr.ID(), pod.ID(), err)
}
}
}
@@ -898,14 +920,14 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return err
}
if len(sessions) > 0 {
- return errors.Wrapf(define.ErrExecSessionExists, "container %s has active exec sessions: %s", ctr.ID(), strings.Join(sessions, ", "))
+ return fmt.Errorf("container %s has active exec sessions: %s: %w", ctr.ID(), strings.Join(sessions, ", "), define.ErrExecSessionExists)
}
}
// Does the container have dependencies?
ctrDepsBkt := ctrExists.Bucket(dependenciesBkt)
if ctrDepsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", ctr.ID())
+ return fmt.Errorf("container %s does not have a dependencies bucket: %w", ctr.ID(), define.ErrInternal)
}
deps := []string{}
err = ctrDepsBkt.ForEach(func(id, value []byte) error {
@@ -917,25 +939,25 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return err
}
if len(deps) != 0 {
- return errors.Wrapf(define.ErrDepExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", "))
+ return fmt.Errorf("container %s is a dependency of the following containers: %s: %w", ctr.ID(), strings.Join(deps, ", "), define.ErrDepExists)
}
if err := ctrBucket.DeleteBucket(ctrID); err != nil {
- return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s from DB: %w", ctr.ID(), define.ErrInternal)
}
if err := idsBucket.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s ID in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s ID in DB: %w", ctr.ID(), err)
}
if err := namesBucket.Delete(ctrName); err != nil {
- return errors.Wrapf(err, "error deleting container %s name in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s name in DB: %w", ctr.ID(), err)
}
if err := nsBucket.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s namespace in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s namespace in DB: %w", ctr.ID(), err)
}
if err := allCtrsBucket.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s from all containers bucket in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s from all containers bucket in DB: %w", ctr.ID(), err)
}
depCtrs := ctr.Dependencies()
@@ -964,7 +986,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
if err := depCtrDependsBkt.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error removing container %s as a dependency of container %s", ctr.ID(), depCtr)
+ return fmt.Errorf("error removing container %s as a dependency of container %s: %w", ctr.ID(), depCtr, err)
}
}
@@ -979,11 +1001,11 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
if ctrDepsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "volume %s is missing container dependencies bucket, cannot remove container %s from dependencies", vol.Name, ctr.ID())
+ return fmt.Errorf("volume %s is missing container dependencies bucket, cannot remove container %s from dependencies: %w", vol.Name, ctr.ID(), define.ErrInternal)
}
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name)
+ return fmt.Errorf("error deleting container %s dependency on volume %s: %w", ctr.ID(), vol.Name, err)
}
}
}
@@ -1040,7 +1062,7 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n
}
if strings.HasPrefix(string(checkID), idOrName) {
if exists {
- return errors.Wrapf(define.ErrCtrExists, "more than one result for container ID %s", idOrName)
+ return fmt.Errorf("more than one result for container ID %s: %w", idOrName, define.ErrCtrExists)
}
id = checkID
exists = true
@@ -1053,9 +1075,9 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n
return nil, err
} else if !exists {
if isPod {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "%q is a pod, not a container", idOrName)
+ return nil, fmt.Errorf("%q is a pod, not a container: %w", idOrName, define.ErrNoSuchCtr)
}
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %q found", idOrName)
+ return nil, fmt.Errorf("no container with name or ID %q found: %w", idOrName, define.ErrNoSuchCtr)
}
return id, nil
}
diff --git a/libpod/boltdb_state_linux.go b/libpod/boltdb_state_linux.go
index 8bb10fb63..813afd8bf 100644
--- a/libpod/boltdb_state_linux.go
+++ b/libpod/boltdb_state_linux.go
@@ -4,8 +4,9 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -29,7 +30,7 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er
newState.NetNS = ns
} else {
if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
- return errors.Wrapf(err, "error joining network namespace of container %s", ctr.ID())
+ return fmt.Errorf("error joining network namespace of container %s: %w", ctr.ID(), err)
}
logrus.Errorf("Joining network namespace for container %s: %v", ctr.ID(), err)
diff --git a/libpod/container.go b/libpod/container.go
index 64b4453fb..4e2d93860 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -19,7 +19,6 @@ import (
"github.com/containers/podman/v4/libpod/lock"
"github.com/containers/storage"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -355,14 +354,14 @@ func (c *Container) specFromState() (*spec.Spec, error) {
returnSpec = new(spec.Spec)
content, err := ioutil.ReadAll(f)
if err != nil {
- return nil, errors.Wrapf(err, "error reading container config")
+ return nil, fmt.Errorf("error reading container config: %w", err)
}
if err := json.Unmarshal(content, &returnSpec); err != nil {
- return nil, errors.Wrapf(err, "error unmarshalling container config")
+ return nil, fmt.Errorf("error unmarshalling container config: %w", err)
}
} else if !os.IsNotExist(err) {
// ignore when the file does not exist
- return nil, errors.Wrapf(err, "error opening container config")
+ return nil, fmt.Errorf("error opening container config: %w", err)
}
return returnSpec, nil
@@ -518,7 +517,7 @@ func (c *Container) PortMappings() ([]types.PortMapping, error) {
if len(c.config.NetNsCtr) > 0 {
netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr)
if err != nil {
- return nil, errors.Wrapf(err, "unable to lookup network namespace for container %s", c.ID())
+ return nil, fmt.Errorf("unable to look up network namespace for container %s: %w", c.ID(), err)
}
return netNsCtr.PortMappings()
}
@@ -657,7 +656,7 @@ func (c *Container) Hostname() string {
utsNsCtr, err := c.runtime.GetContainer(c.config.UTSNsCtr)
if err != nil {
// should we return an error here?
- logrus.Errorf("unable to lookup uts namespace for container %s: %v", c.ID(), err)
+ logrus.Errorf("unable to look up uts namespace for container %s: %v", c.ID(), err)
return ""
}
return utsNsCtr.Hostname()
@@ -705,7 +704,7 @@ func (c *Container) Mounted() (bool, string, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return false, "", errors.Wrapf(err, "error updating container %s state", c.ID())
+ return false, "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
// We cannot directly return c.state.Mountpoint as it is not guaranteed
@@ -735,7 +734,7 @@ func (c *Container) StartedTime() (time.Time, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.StartedTime, nil
@@ -747,7 +746,7 @@ func (c *Container) FinishedTime() (time.Time, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.FinishedTime, nil
@@ -762,7 +761,7 @@ func (c *Container) ExitCode() (int32, bool, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return 0, false, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return 0, false, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.ExitCode, c.state.Exited, nil
@@ -774,7 +773,7 @@ func (c *Container) OOMKilled() (bool, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return false, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return false, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.OOMKilled, nil
@@ -845,7 +844,7 @@ func (c *Container) execSessionNoCopy(id string) (*ExecSession, error) {
session, ok := c.state.ExecSessions[id]
if !ok {
- return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID())
+ return nil, fmt.Errorf("no exec session with ID %s found in container %s: %w", id, c.ID(), define.ErrNoSuchExecSession)
}
return session, nil
@@ -861,7 +860,7 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) {
returnSession := new(ExecSession)
if err := JSONDeepCopy(session, returnSession); err != nil {
- return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID())
+ return nil, fmt.Errorf("error copying contents of container %s exec session %s: %w", c.ID(), session.ID(), err)
}
return returnSession, nil
@@ -921,7 +920,7 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return "", errors.Wrapf(err, "error updating container %s state", c.ID())
+ return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
@@ -932,11 +931,11 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
// If the container is not running, an error will be returned
func (c *Container) namespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
- return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
+ return "", fmt.Errorf("cannot get namespace path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
}
if linuxNS == InvalidNS {
- return "", errors.Wrapf(define.ErrInvalidArg, "invalid namespace requested from container %s", c.ID())
+ return "", fmt.Errorf("invalid namespace requested from container %s: %w", c.ID(), define.ErrInvalidArg)
}
return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
@@ -959,7 +958,7 @@ func (c *Container) CgroupPath() (string, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return "", errors.Wrapf(err, "error updating container %s state", c.ID())
+ return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.cGroupPath()
@@ -971,10 +970,10 @@ func (c *Container) CgroupPath() (string, error) {
// NOTE: only call this when owning the container's lock.
func (c *Container) cGroupPath() (string, error) {
if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
- return "", errors.Wrapf(define.ErrNoCgroups, "this container is not creating cgroups")
+ return "", fmt.Errorf("this container is not creating cgroups: %w", define.ErrNoCgroups)
}
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
- return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
+ return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
}
// Read /proc/{PID}/cgroup and find the *longest* cgroup entry. That's
@@ -995,7 +994,7 @@ func (c *Container) cGroupPath() (string, error) {
// If the file doesn't exist, it means the container could have been terminated
// so report it.
if os.IsNotExist(err) {
- return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
+ return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
}
return "", err
}
@@ -1024,7 +1023,7 @@ func (c *Container) cGroupPath() (string, error) {
}
if len(cgroupPath) == 0 {
- return "", errors.Errorf("could not find any cgroup in %q", procPath)
+ return "", fmt.Errorf("could not find any cgroup in %q", procPath)
}
cgroupManager := c.CgroupManager()
@@ -1059,7 +1058,7 @@ func (c *Container) RootFsSize() (int64, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.rootFsSize()
@@ -1071,7 +1070,7 @@ func (c *Container) RWSize() (int64, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.rwSize()
@@ -1118,7 +1117,7 @@ func (c *Container) IsInitCtr() bool {
return len(c.config.InitContainerType) > 0
}
-// IsReadOnly returns whether the container is running in read only mode
+// IsReadOnly returns whether the container is running in read-only mode
func (c *Container) IsReadOnly() bool {
return c.config.Spec.Root.Readonly
}
@@ -1173,7 +1172,7 @@ func (c *Container) ContainerState() (*ContainerState, error) {
}
returnConfig := new(ContainerState)
if err := JSONDeepCopy(c.state, returnConfig); err != nil {
- return nil, errors.Wrapf(err, "error copying container %s state", c.ID())
+ return nil, fmt.Errorf("error copying container %s state: %w", c.ID(), err)
}
return c.state, nil
}
@@ -1331,9 +1330,57 @@ func (c *Container) getNetworkStatus() map[string]types.StatusBlock {
}
c.state.NetworkStatus = result
_ = c.save()
- // TODO remove debug for final version
- logrus.Debugf("converted old network result to new result %v", result)
+
return result
}
return nil
}
+
+func (c *Container) NamespaceMode(ns spec.LinuxNamespaceType, ctrSpec *spec.Spec) string {
+ switch ns {
+ case spec.UTSNamespace:
+ if c.config.UTSNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.UTSNsCtr)
+ }
+ case spec.CgroupNamespace:
+ if c.config.CgroupNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.CgroupNsCtr)
+ }
+ case spec.IPCNamespace:
+ if c.config.IPCNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.IPCNsCtr)
+ }
+ case spec.PIDNamespace:
+ if c.config.PIDNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.PIDNsCtr)
+ }
+ case spec.UserNamespace:
+ if c.config.UserNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.UserNsCtr)
+ }
+ case spec.NetworkNamespace:
+ if c.config.NetNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.NetNsCtr)
+ }
+ case spec.MountNamespace:
+ if c.config.MountNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.MountNsCtr)
+ }
+ }
+
+ if ctrSpec.Linux != nil {
+ // Locate the spec's given namespace.
+ // If there is none, it's namespace=host.
+ // If there is one and it has a path, it's "ns:".
+ // If there is no path, it's default - the empty string.
+ for _, availableNS := range ctrSpec.Linux.Namespaces {
+ if availableNS.Type == ns {
+ if availableNS.Path != "" {
+ return fmt.Sprintf("ns:%s", availableNS.Path)
+ }
+ return "private"
+ }
+ }
+ }
+ return "host"
+}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index a6fcf709d..dbd5fc1fb 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -2,6 +2,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"net/http"
@@ -9,11 +11,11 @@ import (
"sync"
"time"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/signal"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,7 +40,7 @@ func (c *Container) Init(ctx context.Context, recursive bool) error {
}
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
+ return fmt.Errorf("container %s has already been created in runtime: %w", c.ID(), define.ErrCtrStateInvalid)
}
if !recursive {
@@ -102,7 +104,7 @@ func (c *Container) Start(ctx context.Context, recursive bool) error {
// Attach call occurs before Start).
// In overall functionality, it is identical to the Start call, with the added
// side effect that an attach session will also be started.
-func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, recursive bool) (<-chan error, error) {
+func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan resize.TerminalSize, recursive bool) (<-chan error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -123,7 +125,18 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt
// Attach to the container before starting it
go func() {
- if err := c.attach(streams, keys, resize, true, startedChan, nil); err != nil {
+ // Start resizing
+ if c.LogDriver() != define.PassthroughLogging {
+ registerResizeFunc(resize, c.bundlePath())
+ }
+
+ opts := new(AttachOptions)
+ opts.Streams = streams
+ opts.DetachKeys = &keys
+ opts.Start = true
+ opts.Started = startedChan
+
+ if err := c.ociRuntime.Attach(c, opts); err != nil {
attachChan <- err
}
close(attachChan)
@@ -185,7 +198,7 @@ func (c *Container) StopWithTimeout(timeout uint) error {
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopping) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created or running containers. %s is in state %s", c.ID(), c.state.State.String())
+ return fmt.Errorf("can only stop created or running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
}
return c.stop(timeout)
@@ -202,15 +215,14 @@ func (c *Container) Kill(signal uint) error {
}
}
- // TODO: Is killing a paused container OK?
switch c.state.State {
- case define.ContainerStateRunning, define.ContainerStateStopping:
+ case define.ContainerStateRunning, define.ContainerStateStopping, define.ContainerStatePaused:
// Note that killing containers in "stopping" state is okay.
// In that state, the Podman is waiting for the runtime to
// stop the container and if that is taking too long, a user
// may have decided to kill the container after all.
default:
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
+ return fmt.Errorf("can only kill running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
}
// Hardcode all = false, we only use all when removing.
@@ -228,9 +240,9 @@ func (c *Container) Kill(signal uint) error {
// Attach attaches to a container.
// This function returns when the attach finishes. It does not hold the lock for
// the duration of its runtime, only using it at the beginning to verify state.
-func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize) error {
+func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan resize.TerminalSize) error {
if c.LogDriver() == define.PassthroughLogging {
- return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot attach")
+ return fmt.Errorf("this container is using the 'passthrough' log driver, cannot attach: %w", define.ErrNoLogs)
}
if !c.batched {
c.lock.Lock()
@@ -243,7 +255,7 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
+ return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
}
// HACK: This is really gross, but there isn't a better way without
@@ -261,8 +273,18 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-
}()
}
+ // Start resizing
+ if c.LogDriver() != define.PassthroughLogging {
+ registerResizeFunc(resize, c.bundlePath())
+ }
+
+ opts := new(AttachOptions)
+ opts.Streams = streams
+ opts.DetachKeys = &keys
+ opts.AttachReady = attachRdy
+
c.newContainerEvent(events.Attach)
- return c.attach(streams, keys, resize, false, nil, attachRdy)
+ return c.ociRuntime.Attach(c, opts)
}
// HTTPAttach forwards an attach session over a hijacked HTTP session.
@@ -299,11 +321,11 @@ func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
+ return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
}
if !streamAttach && !streamLogs {
- return errors.Wrapf(define.ErrInvalidArg, "must specify at least one of stream or logs")
+ return fmt.Errorf("must specify at least one of stream or logs: %w", define.ErrInvalidArg)
}
logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
@@ -314,7 +336,7 @@ func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *
// AttachResize resizes the container's terminal, which is displayed by Attach
// and HTTPAttach.
-func (c *Container) AttachResize(newSize define.TerminalSize) error {
+func (c *Container) AttachResize(newSize resize.TerminalSize) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -325,7 +347,7 @@ func (c *Container) AttachResize(newSize define.TerminalSize) error {
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only resize created or running containers")
+ return fmt.Errorf("can only resize created or running containers: %w", define.ErrCtrStateInvalid)
}
logrus.Infof("Resizing TTY of container %s", c.ID())
@@ -362,20 +384,20 @@ func (c *Container) Unmount(force bool) error {
if c.state.Mounted {
mounted, err := c.runtime.storageService.MountedContainerImage(c.ID())
if err != nil {
- return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID())
+ return fmt.Errorf("can't determine how many times %s is mounted, refusing to unmount: %w", c.ID(), err)
}
if mounted == 1 {
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
+ return fmt.Errorf("cannot unmount storage for container %s as it is running or paused: %w", c.ID(), define.ErrCtrStateInvalid)
}
execSessions, err := c.getActiveExecSessions()
if err != nil {
return err
}
if len(execSessions) != 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
+ return fmt.Errorf("container %s has active exec sessions, refusing to unmount: %w", c.ID(), define.ErrCtrStateInvalid)
}
- return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
+ return fmt.Errorf("can't unmount %s last mount, it is still in use: %w", c.ID(), define.ErrInternal)
}
}
defer c.newContainerEvent(events.Unmount)
@@ -394,10 +416,10 @@ func (c *Container) Pause() error {
}
if c.state.State == define.ContainerStatePaused {
- return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
+ return fmt.Errorf("%q is already paused: %w", c.ID(), define.ErrCtrStateInvalid)
}
if c.state.State != define.ContainerStateRunning {
- return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
+ return fmt.Errorf("%q is not running, can't pause: %w", c.state.State, define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Pause)
return c.pause()
@@ -415,7 +437,7 @@ func (c *Container) Unpause() error {
}
if c.state.State != define.ContainerStatePaused {
- return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
+ return fmt.Errorf("%q is not paused, can't unpause: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Unpause)
return c.unpause()
@@ -434,7 +456,7 @@ func (c *Container) Export(path string) error {
}
if c.state.State == define.ContainerStateRemoving {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
+ return fmt.Errorf("cannot mount container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Mount)
@@ -447,7 +469,7 @@ func (c *Container) AddArtifact(name string, data []byte) error {
return define.ErrCtrRemoved
}
- return ioutil.WriteFile(c.getArtifactPath(name), data, 0740)
+ return ioutil.WriteFile(c.getArtifactPath(name), data, 0o740)
}
// GetArtifact reads the specified artifact file from the container
@@ -470,41 +492,84 @@ func (c *Container) RemoveArtifact(name string) error {
// Wait blocks until the container exits and returns its exit code.
func (c *Container) Wait(ctx context.Context) (int32, error) {
- return c.WaitWithInterval(ctx, DefaultWaitInterval)
+ return c.WaitForExit(ctx, DefaultWaitInterval)
}
-// WaitWithInterval blocks until the container to exit and returns its exit
-// code. The argument is the interval at which checks the container's status.
-func (c *Container) WaitWithInterval(ctx context.Context, waitTimeout time.Duration) (int32, error) {
+// WaitForExit blocks until the container exits and returns its exit code. The
+// argument is the interval at which checks the container's status.
+func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration) (int32, error) {
if !c.valid {
return -1, define.ErrCtrRemoved
}
- exitFile, err := c.exitFilePath()
- if err != nil {
- return -1, err
- }
- chWait := make(chan error, 1)
+ id := c.ID()
+ var conmonTimer time.Timer
+ conmonTimerSet := false
- go func() {
- <-ctx.Done()
- chWait <- define.ErrCanceled
- }()
+ getExitCode := func() (bool, int32, error) {
+ containerRemoved := false
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
- for {
- // ignore errors here (with exception of cancellation), it is only used to avoid waiting
- // too long.
- _, e := WaitForFile(exitFile, chWait, waitTimeout)
- if e == define.ErrCanceled {
- return -1, define.ErrCanceled
+ if err := c.syncContainer(); err != nil {
+ if !errors.Is(err, define.ErrNoSuchCtr) {
+ return false, -1, err
+ }
+ containerRemoved = true
+ }
+
+ // If conmon is not alive anymore set a timer to make sure
+ // we're returning even if conmon has forcefully been killed.
+ if !conmonTimerSet && !containerRemoved {
+ conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
+ switch {
+ case errors.Is(err, define.ErrNoSuchCtr):
+ containerRemoved = true
+ case err != nil:
+ return false, -1, err
+ case !conmonAlive:
+ timerDuration := time.Second * 20
+ conmonTimer = *time.NewTimer(timerDuration)
+ conmonTimerSet = true
+ }
+ }
+
+ if !containerRemoved {
+ // If conmon is dead for more than $timerDuration or if the
+ // container has exited properly, try to look up the exit code.
+ select {
+ case <-conmonTimer.C:
+ logrus.Debugf("Exceeded conmon timeout waiting for container %s to exit", id)
+ default:
+ if !c.ensureState(define.ContainerStateExited, define.ContainerStateConfigured) {
+ return false, -1, nil
+ }
+ }
}
- stopped, code, err := c.isStopped()
+ exitCode, err := c.runtime.state.GetContainerExitCode(id)
+ if err != nil {
+ return true, -1, err
+ }
+
+ return true, exitCode, nil
+ }
+
+ for {
+ hasExited, exitCode, err := getExitCode()
+ if hasExited {
+ return exitCode, err
+ }
if err != nil {
return -1, err
}
- if stopped {
- return code, nil
+ select {
+ case <-ctx.Done():
+ return -1, fmt.Errorf("waiting for exit code of container %s canceled", id)
+ default:
+ time.Sleep(pollInterval)
}
}
}
@@ -531,11 +596,12 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou
wantedStates := make(map[define.ContainerStatus]bool, len(conditions))
for _, condition := range conditions {
- if condition == define.ContainerStateStopped || condition == define.ContainerStateExited {
+ switch condition {
+ case define.ContainerStateExited, define.ContainerStateStopped:
waitForExit = true
- continue
+ default:
+ wantedStates[condition] = true
}
- wantedStates[condition] = true
}
trySend := func(code int32, err error) {
@@ -552,7 +618,7 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou
go func() {
defer wg.Done()
- code, err := c.WaitWithInterval(ctx, waitTimeout)
+ code, err := c.WaitForExit(ctx, waitTimeout)
trySend(code, err)
}()
}
@@ -601,13 +667,21 @@ func (c *Container) Cleanup(ctx context.Context) error {
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
+ // When the container has already been removed, the OCI runtime directory remain.
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
+ if err := c.cleanupRuntime(ctx); err != nil {
+ return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err)
+ }
+ return nil
+ }
+ logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
return err
}
}
// Check if state is good
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
+ return fmt.Errorf("container %s is running or paused, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
}
// Handle restart policy.
@@ -629,7 +703,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
return err
}
if len(sessions) > 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
+ return fmt.Errorf("container %s has active exec sessions, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Cleanup)
@@ -687,19 +761,8 @@ func (c *Container) Sync() error {
defer c.lock.Unlock()
}
- // If runtime knows about the container, update its status in runtime
- // And then save back to disk
- if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped, define.ContainerStateStopping) {
- oldState := c.state.State
- if err := c.ociRuntime.UpdateContainerStatus(c); err != nil {
- return err
- }
- // Only save back to DB if state changed
- if c.state.State != oldState {
- if err := c.save(); err != nil {
- return err
- }
- }
+ if err := c.syncContainer(); err != nil {
+ return err
}
defer c.newContainerEvent(events.Sync)
@@ -726,7 +789,7 @@ func (c *Container) ReloadNetwork() error {
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot reload network unless container network has been configured")
+ return fmt.Errorf("cannot reload network unless container network has been configured: %w", define.ErrCtrStateInvalid)
}
return c.reloadNetwork()
@@ -878,7 +941,7 @@ func (c *Container) ShouldRestart(ctx context.Context) bool {
// CopyFromArchive copies the contents from the specified tarStream to path
// *inside* the container.
-func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, chown bool, rename map[string]string, tarStream io.Reader) (func() error, error) {
+func (c *Container) CopyFromArchive(_ context.Context, containerPath string, chown, noOverwriteDirNonDir bool, rename map[string]string, tarStream io.Reader) (func() error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -888,7 +951,7 @@ func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, c
}
}
- return c.copyFromArchive(containerPath, chown, rename, tarStream)
+ return c.copyFromArchive(containerPath, chown, noOverwriteDirNonDir, rename, tarStream)
}
// CopyToArchive copies the contents from the specified path *inside* the
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 7018ee7d8..c93c9c7bb 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
libpodutil "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -34,7 +34,7 @@ type ContainerCommitOptions struct {
// image
func (c *Container) Commit(ctx context.Context, destImage string, options ContainerCommitOptions) (*libimage.Image, error) {
if c.config.Rootfs != "" {
- return nil, errors.Errorf("cannot commit a container that uses an exploded rootfs")
+ return nil, errors.New("cannot commit a container that uses an exploded rootfs")
}
if !c.batched {
@@ -48,7 +48,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
if c.state.State == define.ContainerStateRunning && options.Pause {
if err := c.pause(); err != nil {
- return nil, errors.Wrapf(err, "error pausing container %q to commit", c.ID())
+ return nil, fmt.Errorf("error pausing container %q to commit: %w", c.ID(), err)
}
defer func() {
if err := c.unpause(); err != nil {
@@ -136,7 +136,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
if include {
vol, err := c.runtime.GetVolume(v.Name)
if err != nil {
- return nil, errors.Wrapf(err, "volume %s used in container %s has been removed", v.Name, c.ID())
+ return nil, fmt.Errorf("volume %s used in container %s has been removed: %w", v.Name, c.ID(), err)
}
if vol.Anonymous() {
importBuilder.AddVolume(v.Dest)
@@ -202,7 +202,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, resolvedImageName)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", destImage)
+ return nil, fmt.Errorf("error parsing target image name %q: %w", destImage, err)
}
commitRef = imageRef
}
diff --git a/libpod/container_config.go b/libpod/container_config.go
index 3e85ad4d5..544c45a8c 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -194,7 +194,7 @@ type ContainerSecurityConfig struct {
// If not explicitly set, an unused random MLS label will be assigned by
// containers/storage (but only if SELinux is enabled).
MountLabel string `json:"MountLabel,omitempty"`
- // LabelOpts are options passed in by the user to setup SELinux labels.
+ // LabelOpts are options passed in by the user to set up SELinux labels.
// These are used by the containers/storage library.
LabelOpts []string `json:"labelopts,omitempty"`
// User and group to use in the container. Can be specified as only user
@@ -243,12 +243,12 @@ type ContainerNetworkConfig struct {
// This cannot be set unless CreateNetNS is set.
// If not set, the container will be dynamically assigned an IP by CNI.
// Deprecated: Do no use this anymore, this is only for DB backwards compat.
- StaticIP net.IP `json:"staticIP"`
+ StaticIP net.IP `json:"staticIP,omitempty"`
// StaticMAC is a static MAC to request for the container.
// This cannot be set unless CreateNetNS is set.
// If not set, the container will be dynamically assigned a MAC by CNI.
// Deprecated: Do no use this anymore, this is only for DB backwards compat.
- StaticMAC types.HardwareAddr `json:"staticMAC"`
+ StaticMAC types.HardwareAddr `json:"staticMAC,omitempty"`
// PortMappings are the ports forwarded to the container's network
// namespace
// These are not used unless CreateNetNS is true
@@ -372,7 +372,6 @@ type ContainerMiscConfig struct {
// restart the container. Used only if RestartPolicy is set to
// "on-failure".
RestartRetries uint `json:"restart_retries,omitempty"`
- // TODO log options for log drivers
// PostConfigureNetNS needed when a user namespace is created by an OCI runtime
// if the network namespace is created before the user namespace it will be
// owned by the wrong user namespace.
@@ -387,7 +386,7 @@ type ContainerMiscConfig struct {
IsService bool `json:"isService"`
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
- // Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false
+ // Systemd tells libpod to set up the container in systemd mode, a value of nil denotes false
Systemd *bool `json:"systemd,omitempty"`
// HealthCheckConfig has the health check command and related timings
HealthCheckConfig *manifest.Schema2HealthConfig `json:"healthcheck"`
@@ -413,6 +412,9 @@ type ContainerMiscConfig struct {
InitContainerType string `json:"init_container_type,omitempty"`
// PasswdEntry specifies arbitrary data to append to a file.
PasswdEntry string `json:"passwd_entry,omitempty"`
+ // MountAllDevices is an option to indicate whether a privileged container
+ // will mount all the host's devices
+ MountAllDevices bool `json:"mountAllDevices"`
}
// InfraInherit contains the compatible options inheritable from the infra container
@@ -422,7 +424,6 @@ type InfraInherit struct {
CapDrop []string `json:"cap_drop,omitempty"`
HostDeviceList []spec.LinuxDevice `json:"host_device_list,omitempty"`
ImageVolumes []*specgen.ImageVolume `json:"image_volumes,omitempty"`
- InfraResources *spec.LinuxResources `json:"resource_limits,omitempty"`
Mounts []spec.Mount `json:"mounts,omitempty"`
NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
OverlayVolumes []*specgen.OverlayVolume `json:"overlay_volumes,omitempty"`
@@ -430,4 +431,10 @@ type InfraInherit struct {
SeccompProfilePath string `json:"seccomp_profile_path,omitempty"`
SelinuxOpts []string `json:"selinux_opts,omitempty"`
Volumes []*specgen.NamedVolume `json:"volumes,omitempty"`
+ ShmSize *int64 `json:"shm_size"`
+}
+
+// IsDefaultShmSize determines if the user actually set the shm in the parent ctr or if it has been set to the default size
+func (inherit *InfraInherit) IsDefaultShmSize() bool {
+ return inherit.ShmSize == nil || *inherit.ShmSize == 65536000
}
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
index 7566fbb12..557fead1e 100644
--- a/libpod/container_copy_linux.go
+++ b/libpod/container_copy_linux.go
@@ -4,6 +4,8 @@
package libpod
import (
+ "errors"
+ "fmt"
"io"
"os"
"path/filepath"
@@ -18,12 +20,11 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
-func (c *Container) copyFromArchive(path string, chown bool, rename map[string]string, reader io.Reader) (func() error, error) {
+func (c *Container) copyFromArchive(path string, chown, noOverwriteDirNonDir bool, rename map[string]string, reader io.Reader) (func() error, error) {
var (
mountPoint string
resolvedRoot string
@@ -89,11 +90,13 @@ func (c *Container) copyFromArchive(path string, chown bool, rename map[string]s
defer unmount()
defer decompressed.Close()
putOptions := buildahCopiah.PutOptions{
- UIDMap: c.config.IDMappings.UIDMap,
- GIDMap: c.config.IDMappings.GIDMap,
- ChownDirs: idPair,
- ChownFiles: idPair,
- Rename: rename,
+ UIDMap: c.config.IDMappings.UIDMap,
+ GIDMap: c.config.IDMappings.GIDMap,
+ ChownDirs: idPair,
+ ChownFiles: idPair,
+ NoOverwriteDirNonDir: noOverwriteDirNonDir,
+ NoOverwriteNonDirDir: noOverwriteDirNonDir,
+ Rename: rename,
}
return c.joinMountAndExec(
@@ -194,7 +197,7 @@ func getContainerUser(container *Container, mountPoint string) (specs.User, erro
if !strings.Contains(userspec, ":") {
groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
if err2 != nil {
- if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
err = err2
}
} else {
@@ -251,7 +254,7 @@ func (c *Container) joinMountAndExec(f func() error) error {
inHostPidNS, err := c.inHostPidNS()
if err != nil {
- errChan <- errors.Wrap(err, "checking inHostPidNS")
+ errChan <- fmt.Errorf("checking inHostPidNS: %w", err)
return
}
var pidFD *os.File
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index c05e7fd94..d3c80e896 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -2,6 +2,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"io/ioutil"
"net/http"
"os"
@@ -9,10 +11,10 @@ import (
"strconv"
"time"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -79,11 +81,11 @@ type ExecConfig struct {
type ExecSession struct {
// Id is the ID of the exec session.
// Named somewhat strangely to not conflict with ID().
- // nolint:stylecheck,revive
+ //nolint:stylecheck,revive
Id string `json:"id"`
// ContainerId is the ID of the container this exec session belongs to.
// Named somewhat strangely to not conflict with ContainerID().
- // nolint:stylecheck,revive
+ //nolint:stylecheck,revive
ContainerId string `json:"containerId"`
// State is the state of the exec session.
@@ -112,7 +114,7 @@ func (e *ExecSession) ContainerID() string {
// configuration and current state.
func (e *ExecSession) Inspect() (*define.InspectExecSession, error) {
if e.Config == nil {
- return nil, errors.Wrapf(define.ErrInternal, "given exec session does not have a configuration block")
+ return nil, fmt.Errorf("given exec session does not have a configuration block: %w", define.ErrInternal)
}
output := new(define.InspectExecSession)
@@ -165,18 +167,18 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
// Verify our config
if config == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a configuration to ExecCreate")
+ return "", fmt.Errorf("must provide a configuration to ExecCreate: %w", define.ErrInvalidArg)
}
if len(config.Command) == 0 {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session")
+ return "", fmt.Errorf("must provide a non-empty command to start an exec session: %w", define.ErrInvalidArg)
}
if config.ExitCommandDelay > 0 && len(config.ExitCommand) == 0 {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty exit command if giving an exit command delay")
+ return "", fmt.Errorf("must provide a non-empty exit command if giving an exit command delay: %w", define.ErrInvalidArg)
}
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return "", errors.Wrapf(define.ErrCtrStateInvalid, "can only create exec sessions on running containers")
+ return "", fmt.Errorf("can only create exec sessions on running containers: %w", define.ErrCtrStateInvalid)
}
// Generate an ID for our new exec session
@@ -203,7 +205,7 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
session.State = define.ExecStateCreated
session.Config = new(ExecConfig)
if err := JSONDeepCopy(config, session.Config); err != nil {
- return "", errors.Wrapf(err, "error copying exec configuration into exec session")
+ return "", fmt.Errorf("error copying exec configuration into exec session: %w", err)
}
if len(session.Config.ExitCommand) > 0 {
@@ -243,16 +245,16 @@ func (c *Container) ExecStart(sessionID string) error {
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State != define.ExecStateCreated {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
@@ -277,11 +279,13 @@ func (c *Container) ExecStart(sessionID string) error {
return c.save()
}
+func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachStreams, newSize *resize.TerminalSize) error {
+ return c.execStartAndAttach(sessionID, streams, newSize, false)
+}
+
// ExecStartAndAttach starts and attaches to an exec session in a container.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
-// TODO: Should we include detach keys in the signature to allow override?
-// TODO: How do we handle AttachStdin/AttachStdout/AttachStderr?
-func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachStreams, newSize *define.TerminalSize) error {
+func (c *Container) execStartAndAttach(sessionID string, streams *define.AttachStreams, newSize *resize.TerminalSize, isHealthcheck bool) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -293,16 +297,16 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State != define.ExecStateCreated {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
@@ -317,7 +321,12 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
return err
}
- c.newContainerEvent(events.Exec)
+ if isHealthcheck {
+ c.newContainerEvent(events.HealthStatus)
+ } else {
+ c.newContainerEvent(events.Exec)
+ }
+
logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
var lastErr error
@@ -363,7 +372,7 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
- return errors.Wrapf(err, "error syncing container %s state to update exec session %s", c.ID(), sessionID)
+ return fmt.Errorf("error syncing container %s state to update exec session %s: %w", c.ID(), sessionID, err)
}
// Now handle the error from readExecExitCode above.
@@ -415,7 +424,7 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, newSize *define.TerminalSize) error {
+ streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, newSize *resize.TerminalSize) error {
// TODO: How do we combine streams with the default streams set in the exec session?
// Ensure that we don't leak a goroutine here
@@ -434,16 +443,16 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
}
if session.State != define.ExecStateCreated {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
@@ -560,11 +569,11 @@ func (c *Container) ExecStop(sessionID string, timeout *uint) error {
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State != define.ExecStateRunning {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is %q, can only stop running sessions", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("container %s exec session %s is %q, can only stop running sessions: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Stopping container %s exec session %s", c.ID(), session.ID())
@@ -610,7 +619,7 @@ func (c *Container) ExecCleanup(sessionID string) error {
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State == define.ExecStateRunning {
@@ -621,7 +630,7 @@ func (c *Container) ExecCleanup(sessionID string) error {
}
if alive {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
+ return fmt.Errorf("cannot clean up container %s exec session %s as it is running: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
if err := retrieveAndWriteExecExitCode(c, session.ID()); err != nil {
@@ -648,7 +657,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID())
@@ -669,7 +678,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
if session.State == define.ExecStateRunning {
if !force {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is still running, cannot remove", c.ID(), session.ID())
+ return fmt.Errorf("container %s exec session %s is still running, cannot remove: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
// Stop the session
@@ -703,7 +712,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
// ExecResize resizes the TTY of the given exec session. Only available if the
// exec session created a TTY.
-func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) error {
+func (c *Container) ExecResize(sessionID string, newSize resize.TerminalSize) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -715,13 +724,13 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
logrus.Infof("Resizing container %s exec session %s to %+v", c.ID(), session.ID(), newSize)
if session.State != define.ExecStateRunning {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it is not running", c.ID(), session.ID())
+ return fmt.Errorf("cannot resize container %s exec session %s as it is not running: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
// The exec session may have exited since we last updated.
@@ -737,7 +746,7 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
logrus.Errorf("Saving state of container %s: %v", c.ID(), err)
}
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it has stopped", c.ID(), session.ID())
+ return fmt.Errorf("cannot resize container %s exec session %s as it has stopped: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
// Make sure the exec session is still running.
@@ -745,10 +754,14 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
return c.ociRuntime.ExecAttachResize(c, sessionID, newSize)
}
+func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resize <-chan resize.TerminalSize) (int, error) {
+ return c.exec(config, streams, resize, false)
+}
+
// Exec emulates the old Libpod exec API, providing a single call to create,
// run, and remove an exec session. Returns exit code and error. Exit code is
// not guaranteed to be set sanely if error is not nil.
-func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resize <-chan define.TerminalSize) (int, error) {
+func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resizeChan <-chan resize.TerminalSize, isHealthcheck bool) (int, error) {
sessionID, err := c.ExecCreate(config)
if err != nil {
return -1, err
@@ -761,15 +774,15 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
// API there.
// TODO: Refactor so this is closed here, before we remove the exec
// session.
- var size *define.TerminalSize
- if resize != nil {
- s := <-resize
+ var size *resize.TerminalSize
+ if resizeChan != nil {
+ s := <-resizeChan
size = &s
go func() {
logrus.Debugf("Sending resize events to exec session %s", sessionID)
- for resizeRequest := range resize {
+ for resizeRequest := range resizeChan {
if err := c.ExecResize(sessionID, resizeRequest); err != nil {
- if errors.Cause(err) == define.ErrExecSessionStateInvalid {
+ if errors.Is(err, define.ErrExecSessionStateInvalid) {
// The exec session stopped
// before we could resize.
logrus.Infof("Missed resize on exec session %s, already stopped", sessionID)
@@ -782,13 +795,13 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
}()
}
- if err := c.ExecStartAndAttach(sessionID, streams, size); err != nil {
+ if err := c.execStartAndAttach(sessionID, streams, size, isHealthcheck); err != nil {
return -1, err
}
session, err := c.execSessionNoCopy(sessionID)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchExecSession {
+ if errors.Is(err, define.ErrNoSuchExecSession) {
// TODO: If a proper Context is ever plumbed in here, we
// should use it.
// As things stand, though, it's not worth it - this
@@ -796,7 +809,7 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
// streaming.
diedEvent, err := c.runtime.GetExecDiedEvent(context.Background(), c.ID(), sessionID)
if err != nil {
- return -1, errors.Wrapf(err, "error retrieving exec session %s exit code", sessionID)
+ return -1, fmt.Errorf("error retrieving exec session %s exit code: %w", sessionID, err)
}
return diedEvent.ContainerExitCode, nil
}
@@ -804,7 +817,7 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
}
exitCode := session.ExitCode
if err := c.ExecRemove(sessionID, false); err != nil {
- if errors.Cause(err) == define.ErrNoSuchExecSession {
+ if errors.Is(err, define.ErrNoSuchExecSession) {
return exitCode, nil
}
return -1, err
@@ -826,7 +839,7 @@ func (c *Container) cleanupExecBundle(sessionID string) (err error) {
}
if pathErr, ok := err.(*os.PathError); ok {
err = pathErr.Err
- if errors.Cause(err) == unix.ENOTEMPTY || errors.Cause(err) == unix.EBUSY {
+ if errors.Is(err, unix.ENOTEMPTY) || errors.Is(err, unix.EBUSY) {
// give other processes a chance to use the container
if !c.batched {
if err := c.save(); err != nil {
@@ -898,7 +911,7 @@ func (c *Container) createExecBundle(sessionID string) (retErr error) {
if err := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
+ return fmt.Errorf("error creating OCI runtime exit file path %s: %w", c.execExitFileDir(sessionID), err)
}
}
return nil
@@ -937,7 +950,7 @@ func (c *Container) getExecSessionPID(sessionID string) (int, error) {
return oldSession.PID, nil
}
- return -1, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", sessionID, c.ID())
+ return -1, fmt.Errorf("no exec session with ID %s found in container %s: %w", sessionID, c.ID(), define.ErrNoSuchExecSession)
}
// getKnownExecSessions gets a list of all exec sessions we think are running,
@@ -1051,7 +1064,7 @@ func (c *Container) removeAllExecSessions() error {
}
// Delete all exec sessions
if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
- if errors.Cause(err) != define.ErrCtrRemoved {
+ if !errors.Is(err, define.ErrCtrRemoved) {
if lastErr != nil {
logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
@@ -1061,7 +1074,7 @@ func (c *Container) removeAllExecSessions() error {
c.state.ExecSessions = nil
c.state.LegacyExecSessions = nil
if err := c.save(); err != nil {
- if errors.Cause(err) != define.ErrCtrRemoved {
+ if !errors.Is(err, define.ErrCtrRemoved) {
if lastErr != nil {
logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
@@ -1102,13 +1115,13 @@ func writeExecExitCode(c *Container, sessionID string, exitCode int) error {
// If we can't do this, no point in continuing, any attempt to save
// would write garbage to the DB.
if err := c.syncContainer(); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
// Container's entirely removed. We can't save status,
// but the container's entirely removed, so we don't
// need to. Exit without error.
return nil
}
- return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ return fmt.Errorf("error syncing container %s state to remove exec session %s: %w", c.ID(), sessionID, err)
}
return justWriteExecExitCode(c, sessionID, exitCode)
diff --git a/libpod/container_graph.go b/libpod/container_graph.go
index eeb0f02fa..67b1abc34 100644
--- a/libpod/container_graph.go
+++ b/libpod/container_graph.go
@@ -2,10 +2,10 @@ package libpod
import (
"context"
+ "fmt"
"strings"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -60,7 +60,7 @@ func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) {
// Get the dep's node
depNode, ok := graph.nodes[dep]
if !ok {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s not found in input list", node.id, dep)
+ return nil, fmt.Errorf("container %s depends on container %s not found in input list: %w", node.id, dep, define.ErrNoSuchCtr)
}
// Add the dependent node to the node's dependencies
@@ -85,7 +85,7 @@ func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) {
if err != nil {
return nil, err
} else if cycle {
- return nil, errors.Wrapf(define.ErrInternal, "cycle found in container dependency graph")
+ return nil, fmt.Errorf("cycle found in container dependency graph: %w", define.ErrInternal)
}
return graph, nil
@@ -150,7 +150,7 @@ func detectCycles(graph *ContainerGraph) (bool, error) {
if info.lowLink == info.index {
l := len(stack)
if l == 0 {
- return false, errors.Wrapf(define.ErrInternal, "empty stack in detectCycles")
+ return false, fmt.Errorf("empty stack in detectCycles: %w", define.ErrInternal)
}
// Pop off the stack
@@ -160,7 +160,7 @@ func detectCycles(graph *ContainerGraph) (bool, error) {
// Popped item is no longer on the stack, mark as such
topInfo, ok := nodes[topOfStack.id]
if !ok {
- return false, errors.Wrapf(define.ErrInternal, "error finding node info for %s", topOfStack.id)
+ return false, fmt.Errorf("error finding node info for %s: %w", topOfStack.id, define.ErrInternal)
}
topInfo.onStack = false
@@ -203,7 +203,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
if setError {
// Mark us as visited, and set an error
ctrsVisited[node.id] = true
- ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
+ ctrErrors[node.id] = fmt.Errorf("a dependency of container %s failed to start: %w", node.id, define.ErrCtrStateInvalid)
// Hit anyone who depends on us, and set errors on them too
for _, successor := range node.dependedOn {
@@ -243,7 +243,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
} else if len(depsStopped) > 0 {
// Our dependencies are not running
depsList := strings.Join(depsStopped, ",")
- ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ ctrErrors[node.id] = fmt.Errorf("the following dependencies of container %s are not running: %s: %w", node.id, depsList, define.ErrCtrStateInvalid)
ctrErrored = true
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 93240812d..fa2130a28 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"sort"
"strings"
@@ -14,7 +15,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/runtime-tools/validate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
)
@@ -24,15 +24,15 @@ import (
func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) {
storeCtr, err := c.runtime.store.Container(c.ID())
if err != nil {
- return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
+ return nil, fmt.Errorf("error getting container from store %q: %w", c.ID(), err)
}
layer, err := c.runtime.store.Layer(storeCtr.LayerID)
if err != nil {
- return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
+ return nil, fmt.Errorf("error reading information about layer %q: %w", storeCtr.LayerID, err)
}
driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
if err != nil {
- return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
+ return nil, fmt.Errorf("error getting graph driver info %q: %w", c.ID(), err)
}
return c.getContainerInspectData(size, driverData)
}
@@ -241,7 +241,7 @@ func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes
// volume.
volFromDB, err := c.runtime.state.Volume(volume.Name)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
+ return nil, fmt.Errorf("error looking up volume %s in container %s config: %w", volume.Name, c.ID(), err)
}
mountStruct.Driver = volFromDB.Driver()
@@ -794,28 +794,8 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
hostConfig.PidMode = pidMode
// UTS namespace mode
- utsMode := ""
- if c.config.UTSNsCtr != "" {
- utsMode = fmt.Sprintf("container:%s", c.config.UTSNsCtr)
- } else if ctrSpec.Linux != nil {
- // Locate the spec's UTS namespace.
- // If there is none, it's uts=host.
- // If there is one and it has a path, it's "ns:".
- // If there is no path, it's default - the empty string.
- for _, ns := range ctrSpec.Linux.Namespaces {
- if ns.Type == spec.UTSNamespace {
- if ns.Path != "" {
- utsMode = fmt.Sprintf("ns:%s", ns.Path)
- } else {
- utsMode = "private"
- }
- break
- }
- }
- if utsMode == "" {
- utsMode = "host"
- }
- }
+ utsMode := c.NamespaceMode(spec.UTSNamespace, ctrSpec)
+
hostConfig.UTSMode = utsMode
// User namespace mode
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 7494eb3ec..560b4a1c1 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -3,6 +3,7 @@ package libpod
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -17,9 +18,11 @@ import (
"github.com/containers/buildah/pkg/overlay"
butil "github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/resolvconf"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/chown"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/ctime"
@@ -39,7 +42,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -83,7 +85,7 @@ func (c *Container) rootFsSize() (int64, error) {
for layer.Parent != "" {
layerSize, err := c.runtime.store.DiffSize(layer.Parent, layer.ID)
if err != nil {
- return size, errors.Wrapf(err, "getting diffsize of layer %q and its parent %q", layer.ID, layer.Parent)
+ return size, fmt.Errorf("getting diffsize of layer %q and its parent %q: %w", layer.ID, layer.Parent, err)
}
size += layerSize
layer, err = c.runtime.store.Layer(layer.Parent)
@@ -199,12 +201,12 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
c.state.FinishedTime = ctime.Created(fi)
statusCodeStr, err := ioutil.ReadFile(exitFile)
if err != nil {
- return errors.Wrapf(err, "failed to read exit file for container %s", c.ID())
+ return fmt.Errorf("failed to read exit file for container %s: %w", c.ID(), err)
}
statusCode, err := strconv.Atoi(string(statusCodeStr))
if err != nil {
- return errors.Wrapf(err, "error converting exit status code (%q) for container %s to int",
- c.ID(), statusCodeStr)
+ return fmt.Errorf("error converting exit status code (%q, err) for container %s to int: %w",
+ c.ID(), statusCodeStr, err)
}
c.state.ExitCode = int32(statusCode)
@@ -218,7 +220,7 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
// Write an event for the container's death
c.newContainerExitedEvent(c.state.ExitCode)
- return nil
+ return c.runtime.state.AddContainerExitCode(c.ID(), c.state.ExitCode)
}
func (c *Container) shouldRestart() bool {
@@ -266,7 +268,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
return false, nil
} else if c.state.State == define.ContainerStateUnknown {
- return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt")
+ return false, fmt.Errorf("invalid container state encountered in restart attempt: %w", define.ErrInternal)
}
c.newContainerEvent(events.Restart)
@@ -289,7 +291,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
return false, err
}
- // setup slirp4netns again because slirp4netns will die when conmon exits
+ // set up slirp4netns again because slirp4netns will die when conmon exits
if c.config.NetMode.IsSlirp4netns() {
err := c.runtime.setupSlirp4netns(c, c.state.NetNS)
if err != nil {
@@ -297,7 +299,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
}
}
- // setup rootlesskit port forwarder again since it dies when conmon exits
+ // set up rootlesskit port forwarder again since it dies when conmon exits
// we use rootlesskit port forwarder only as rootless and when bridge network is used
if rootless.IsRootless() && c.config.NetMode.IsBridge() && len(c.config.PortMappings) > 0 {
err := c.runtime.setupRootlessPortMappingViaRLK(c, c.state.NetNS.Path(), c.state.NetworkStatus)
@@ -369,7 +371,7 @@ func (c *Container) syncContainer() error {
}
if !c.valid {
- return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
+ return fmt.Errorf("container %s is not valid: %w", c.ID(), define.ErrCtrRemoved)
}
return nil
@@ -428,16 +430,16 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) {
// Create container root filesystem for use
func (c *Container) setupStorage(ctx context.Context) error {
if !c.valid {
- return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
+ return fmt.Errorf("container %s is not valid: %w", c.ID(), define.ErrCtrRemoved)
}
if c.state.State != define.ContainerStateConfigured {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
+ return fmt.Errorf("container %s must be in Configured state to have storage set up: %w", c.ID(), define.ErrCtrStateInvalid)
}
// Need both an image ID and image name, plus a bool telling us whether to use the image configuration
if c.config.Rootfs == "" && (c.config.RootfsImageID == "" || c.config.RootfsImageName == "") {
- return errors.Wrapf(define.ErrInvalidArg, "must provide image ID and image name to use an image")
+ return fmt.Errorf("must provide image ID and image name to use an image: %w", define.ErrInvalidArg)
}
options := storage.ContainerOptions{
IDMappingOptions: storage.IDMappingOptions{
@@ -473,7 +475,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
defOptions, err := storage.GetMountOptions(c.runtime.store.GraphDriverName(), c.runtime.store.GraphOptions())
if err != nil {
- return errors.Wrapf(err, "error getting default mount options")
+ return fmt.Errorf("error getting default mount options: %w", err)
}
var newOptions []string
for _, opt := range defOptions {
@@ -503,12 +505,12 @@ func (c *Container) setupStorage(ctx context.Context) error {
}
containerInfo, containerInfoErr = c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options)
- if !generateName || errors.Cause(containerInfoErr) != storage.ErrDuplicateName {
+ if !generateName || !errors.Is(containerInfoErr, storage.ErrDuplicateName) {
break
}
}
if containerInfoErr != nil {
- return errors.Wrapf(containerInfoErr, "error creating container storage")
+ return fmt.Errorf("error creating container storage: %w", containerInfoErr)
}
// only reconfig IDMappings if layer was mounted from storage
@@ -550,7 +552,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
if err := os.MkdirAll(artifacts, 0755); err != nil {
- return errors.Wrap(err, "error creating artifacts directory")
+ return fmt.Errorf("error creating artifacts directory: %w", err)
}
return nil
@@ -579,16 +581,16 @@ func (c *Container) processLabel(processLabel string) (string, error) {
// Tear down a container's storage prior to removal
func (c *Container) teardownStorage() error {
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
+ return fmt.Errorf("cannot remove storage for container %s as it is running or paused: %w", c.ID(), define.ErrCtrStateInvalid)
}
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
if err := os.RemoveAll(artifacts); err != nil {
- return errors.Wrapf(err, "error removing container %s artifacts %q", c.ID(), artifacts)
+ return fmt.Errorf("error removing container %s artifacts %q: %w", c.ID(), artifacts, err)
}
if err := c.cleanupStorage(); err != nil {
- return errors.Wrapf(err, "failed to cleanup container %s storage", c.ID())
+ return fmt.Errorf("failed to clean up container %s storage: %w", c.ID(), err)
}
if err := c.runtime.storageService.DeleteContainer(c.ID()); err != nil {
@@ -596,12 +598,12 @@ func (c *Container) teardownStorage() error {
// error - we wanted it gone, it is already gone.
// Potentially another tool using containers/storage already
// removed it?
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
logrus.Infof("Storage for container %s already removed", c.ID())
return nil
}
- return errors.Wrapf(err, "error removing container %s root filesystem", c.ID())
+ return fmt.Errorf("error removing container %s root filesystem: %w", c.ID(), err)
}
return nil
@@ -645,14 +647,14 @@ func (c *Container) refresh() error {
}
if !c.valid {
- return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID())
+ return fmt.Errorf("container %s is not valid - may have been removed: %w", c.ID(), define.ErrCtrRemoved)
}
// We need to get the container's temporary directory from c/storage
// It was lost in the reboot and must be recreated
dir, err := c.runtime.storageService.GetRunDir(c.ID())
if err != nil {
- return errors.Wrapf(err, "error retrieving temporary directory for container %s", c.ID())
+ return fmt.Errorf("error retrieving temporary directory for container %s: %w", c.ID(), err)
}
c.state.RunDir = dir
@@ -666,7 +668,7 @@ func (c *Container) refresh() error {
}
root := filepath.Join(c.runtime.config.Engine.TmpDir, "containers-root", c.ID())
if err := os.MkdirAll(root, 0755); err != nil {
- return errors.Wrapf(err, "error creating userNS tmpdir for container %s", c.ID())
+ return fmt.Errorf("error creating userNS tmpdir for container %s: %w", c.ID(), err)
}
if err := os.Chown(root, c.RootUID(), c.RootGID()); err != nil {
return err
@@ -676,7 +678,7 @@ func (c *Container) refresh() error {
// We need to pick up a new lock
lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error acquiring lock %d for container %s", c.config.LockID, c.ID())
+ return fmt.Errorf("error acquiring lock %d for container %s: %w", c.config.LockID, c.ID(), err)
}
c.lock = lock
@@ -691,13 +693,13 @@ func (c *Container) refresh() error {
if c.config.rewrite {
// SafeRewriteContainerConfig must be used with care. Make sure to not change config fields by accident.
if err := c.runtime.state.SafeRewriteContainerConfig(c, "", "", c.config); err != nil {
- return errors.Wrapf(err, "failed to rewrite the config for container %s", c.config.ID)
+ return fmt.Errorf("failed to rewrite the config for container %s: %w", c.config.ID, err)
}
c.config.rewrite = false
}
if err := c.save(); err != nil {
- return errors.Wrapf(err, "error refreshing state for container %s", c.ID())
+ return fmt.Errorf("error refreshing state for container %s: %w", c.ID(), err)
}
// Remove ctl and attach files, which may persist across reboot
@@ -714,26 +716,26 @@ func (c *Container) removeConmonFiles() error {
// Files are allowed to not exist, so ignore ENOENT
attachFile, err := c.AttachSocketPath()
if err != nil {
- return errors.Wrapf(err, "failed to get attach socket path for container %s", c.ID())
+ return fmt.Errorf("failed to get attach socket path for container %s: %w", c.ID(), err)
}
if err := os.Remove(attachFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s attach file", c.ID())
+ return fmt.Errorf("error removing container %s attach file: %w", c.ID(), err)
}
ctlFile := filepath.Join(c.bundlePath(), "ctl")
if err := os.Remove(ctlFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s ctl file", c.ID())
+ return fmt.Errorf("error removing container %s ctl file: %w", c.ID(), err)
}
winszFile := filepath.Join(c.bundlePath(), "winsz")
if err := os.Remove(winszFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s winsz file", c.ID())
+ return fmt.Errorf("error removing container %s winsz file: %w", c.ID(), err)
}
oomFile := filepath.Join(c.bundlePath(), "oom")
if err := os.Remove(oomFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s OOM file", c.ID())
+ return fmt.Errorf("error removing container %s OOM file: %w", c.ID(), err)
}
// Remove the exit file so we don't leak memory in tmpfs
@@ -742,7 +744,7 @@ func (c *Container) removeConmonFiles() error {
return err
}
if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s exit file", c.ID())
+ return fmt.Errorf("error removing container %s exit file: %w", c.ID(), err)
}
return nil
@@ -753,7 +755,7 @@ func (c *Container) export(path string) error {
if !c.state.Mounted {
containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
if err != nil {
- return errors.Wrapf(err, "mounting container %q", c.ID())
+ return fmt.Errorf("mounting container %q: %w", c.ID(), err)
}
mountPoint = containerMount
defer func() {
@@ -765,12 +767,12 @@ func (c *Container) export(path string) error {
input, err := archive.Tar(mountPoint, archive.Uncompressed)
if err != nil {
- return errors.Wrapf(err, "error reading container directory %q", c.ID())
+ return fmt.Errorf("error reading container directory %q: %w", c.ID(), err)
}
outFile, err := os.Create(path)
if err != nil {
- return errors.Wrapf(err, "error creating file %q", path)
+ return fmt.Errorf("error creating file %q: %w", path, err)
}
defer outFile.Close()
@@ -783,24 +785,10 @@ func (c *Container) getArtifactPath(name string) string {
return filepath.Join(c.config.StaticDir, artifactsDir, name)
}
-// Used with Wait() to determine if a container has exited
-func (c *Container) isStopped() (bool, int32, error) {
- if !c.batched {
- c.lock.Lock()
- defer c.lock.Unlock()
- }
- err := c.syncContainer()
- if err != nil {
- return true, -1, err
- }
-
- return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping), c.state.ExitCode, nil
-}
-
// save container state to the database
func (c *Container) save() error {
if err := c.runtime.state.SaveContainer(c); err != nil {
- return errors.Wrapf(err, "error saving container %s state", c.ID())
+ return fmt.Errorf("error saving container %s state: %w", c.ID(), err)
}
return nil
}
@@ -811,7 +799,7 @@ func (c *Container) save() error {
func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr error) {
// Container must be created or stopped to be started
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ return fmt.Errorf("container %s must be in Created or Stopped state to be started: %w", c.ID(), define.ErrCtrStateInvalid)
}
if !recursive {
@@ -854,11 +842,11 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr
func (c *Container) checkDependenciesAndHandleError() error {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
+ return fmt.Errorf("error checking dependencies for container %s: %w", c.ID(), err)
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
- return errors.Wrapf(define.ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ return fmt.Errorf("some dependencies of container %s are not started: %s: %w", c.ID(), depString, define.ErrCtrStateInvalid)
}
return nil
@@ -873,7 +861,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
depVisitedCtrs := make(map[string]*Container)
if err := c.getAllDependencies(depVisitedCtrs); err != nil {
- return errors.Wrapf(err, "error starting dependency for container %s", c.ID())
+ return fmt.Errorf("error starting dependency for container %s: %w", c.ID(), err)
}
// Because of how Go handles passing slices through functions, a slice cannot grow between function calls
@@ -886,7 +874,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
// Build a dependency graph of containers
graph, err := BuildContainerGraph(depCtrs)
if err != nil {
- return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID())
+ return fmt.Errorf("error generating dependency graph for container %s: %w", c.ID(), err)
}
// If there are no containers without dependencies, we can't start
@@ -896,7 +884,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
if len(graph.nodes) == 0 {
return nil
}
- return errors.Wrapf(define.ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
+ return fmt.Errorf("all dependencies have dependencies of %s: %w", c.ID(), define.ErrNoSuchCtr)
}
ctrErrors := make(map[string]error)
@@ -912,7 +900,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
for _, e := range ctrErrors {
logrus.Errorf("%q", e)
}
- return errors.Wrapf(define.ErrInternal, "error starting some containers")
+ return fmt.Errorf("error starting some containers: %w", define.ErrInternal)
}
return nil
}
@@ -968,13 +956,13 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
// Get the dependency container
depCtr, err := c.runtime.state.Container(dep)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving dependency %s of container %s from state", dep, c.ID())
+ return nil, fmt.Errorf("error retrieving dependency %s of container %s from state: %w", dep, c.ID(), err)
}
// Check the status
state, err := depCtr.State()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
+ return nil, fmt.Errorf("error retrieving state of dependency %s of container %s: %w", dep, c.ID(), err)
}
if state != define.ContainerStateRunning && !depCtr.config.IsInfra {
notRunning = append(notRunning, dep)
@@ -986,7 +974,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
}
func (c *Container) completeNetworkSetup() error {
- var outResolvConf []string
+ var nameservers []string
netDisabled, err := c.NetworkDisabled()
if err != nil {
return err
@@ -1000,11 +988,14 @@ func (c *Container) completeNetworkSetup() error {
if err := c.runtime.setupNetNS(c); err != nil {
return err
}
+ if err := c.save(); err != nil {
+ return err
+ }
state := c.state
// collect any dns servers that cni tells us to use (dnsname)
for _, status := range c.getNetworkStatus() {
for _, server := range status.DNSServerIPs {
- outResolvConf = append(outResolvConf, fmt.Sprintf("nameserver %s", server))
+ nameservers = append(nameservers, server.String())
}
}
// check if we have a bindmount for /etc/hosts
@@ -1020,24 +1011,12 @@ func (c *Container) completeNetworkSetup() error {
}
// check if we have a bindmount for resolv.conf
- resolvBindMount := state.BindMounts["/etc/resolv.conf"]
- if len(outResolvConf) < 1 || resolvBindMount == "" || len(c.config.NetNsCtr) > 0 {
+ resolvBindMount := state.BindMounts[resolvconf.DefaultResolvConf]
+ if len(nameservers) < 1 || resolvBindMount == "" || len(c.config.NetNsCtr) > 0 {
return nil
}
- // read the existing resolv.conf
- b, err := ioutil.ReadFile(resolvBindMount)
- if err != nil {
- return err
- }
- for _, line := range strings.Split(string(b), "\n") {
- // only keep things that don't start with nameserver from the old
- // resolv.conf file
- if !strings.HasPrefix(line, "nameserver") {
- outResolvConf = append([]string{line}, outResolvConf...)
- }
- }
// write and return
- return ioutil.WriteFile(resolvBindMount, []byte(strings.Join(outResolvConf, "\n")), 0644)
+ return resolvconf.Add(resolvBindMount, nameservers)
}
// Initialize a container, creating it in the runtime
@@ -1176,9 +1155,9 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
func (c *Container) initAndStart(ctx context.Context) (retErr error) {
// If we are ContainerStateUnknown, throw an error
if c.state.State == define.ContainerStateUnknown {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
+ return fmt.Errorf("container %s is in an unknown state: %w", c.ID(), define.ErrCtrStateInvalid)
} else if c.state.State == define.ContainerStateRemoving {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start container %s as it is being removed", c.ID())
+ return fmt.Errorf("cannot start container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
}
// If we are running, do nothing
@@ -1187,7 +1166,7 @@ func (c *Container) initAndStart(ctx context.Context) (retErr error) {
}
// If we are paused, throw an error
if c.state.State == define.ContainerStatePaused {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
+ return fmt.Errorf("cannot start paused container %s: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer func() {
@@ -1290,13 +1269,6 @@ func (c *Container) stop(timeout uint) error {
}
}
- // Check if conmon is still alive.
- // If it is not, we won't be getting an exit file.
- conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
- if err != nil {
- return err
- }
-
// Set the container state to "stopping" and unlock the container
// before handing it over to conmon to unblock other commands. #8501
// demonstrates nicely that a high stop timeout will block even simple
@@ -1304,7 +1276,7 @@ func (c *Container) stop(timeout uint) error {
// is held when busy-waiting for the container to be stopped.
c.state.State = define.ContainerStateStopping
if err := c.save(); err != nil {
- return errors.Wrapf(err, "error saving container %s state before stopping", c.ID())
+ return fmt.Errorf("error saving container %s state before stopping: %w", c.ID(), err)
}
if !c.batched {
c.lock.Unlock()
@@ -1315,18 +1287,18 @@ func (c *Container) stop(timeout uint) error {
if !c.batched {
c.lock.Lock()
if err := c.syncContainer(); err != nil {
- switch errors.Cause(err) {
- // If the container has already been removed (e.g., via
- // the cleanup process), there's nothing left to do.
- case define.ErrNoSuchCtr, define.ErrCtrRemoved:
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
+ // If the container has already been removed (e.g., via
+ // the cleanup process), set the container state to "stopped".
+ c.state.State = define.ContainerStateStopped
+ return stopErr
+ }
+
+ if stopErr != nil {
+ logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
return stopErr
- default:
- if stopErr != nil {
- logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
- return stopErr
- }
- return err
}
+ return err
}
}
@@ -1349,25 +1321,22 @@ func (c *Container) stop(timeout uint) error {
}
c.newContainerEvent(events.Stop)
-
- c.state.PID = 0
- c.state.ConmonPID = 0
c.state.StoppedByUser = true
+ conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
+ if err != nil {
+ return err
+ }
if !conmonAlive {
- // Conmon is dead, so we can't expect an exit code.
- c.state.ExitCode = -1
- c.state.FinishedTime = time.Now()
- c.state.State = define.ContainerStateStopped
- if err := c.save(); err != nil {
- logrus.Errorf("Saving container %s status: %v", c.ID(), err)
+ if err := c.checkExitFile(); err != nil {
+ return err
}
- return errors.Wrapf(define.ErrConmonDead, "container %s conmon process missing, cannot retrieve exit code", c.ID())
+ return c.save()
}
if err := c.save(); err != nil {
- return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
+ return fmt.Errorf("error saving container %s state after stopping: %w", c.ID(), err)
}
// Wait until we have an exit file, and sync once we do
@@ -1381,16 +1350,16 @@ func (c *Container) stop(timeout uint) error {
// Internal, non-locking function to pause a container
func (c *Container) pause() error {
if c.config.NoCgroups {
- return errors.Wrapf(define.ErrNoCgroups, "cannot pause without using Cgroups")
+ return fmt.Errorf("cannot pause without using Cgroups: %w", define.ErrNoCgroups)
}
if rootless.IsRootless() {
cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
- return errors.Wrap(err, "failed to determine cgroupversion")
+ return fmt.Errorf("failed to determine cgroupversion: %w", err)
}
if !cgroupv2 {
- return errors.Wrap(define.ErrNoCgroups, "can not pause containers on rootless containers with cgroup V1")
+ return fmt.Errorf("can not pause containers on rootless containers with cgroup V1: %w", define.ErrNoCgroups)
}
}
@@ -1409,7 +1378,7 @@ func (c *Container) pause() error {
// Internal, non-locking function to unpause a container
func (c *Container) unpause() error {
if c.config.NoCgroups {
- return errors.Wrapf(define.ErrNoCgroups, "cannot unpause without using Cgroups")
+ return fmt.Errorf("cannot unpause without using Cgroups: %w", define.ErrNoCgroups)
}
if err := c.ociRuntime.UnpauseContainer(c); err != nil {
@@ -1427,7 +1396,7 @@ func (c *Container) unpause() error {
// Internal, non-locking function to restart a container
func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retErr error) {
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
+ return fmt.Errorf("unable to restart a container in a paused or unknown state: %w", define.ErrCtrStateInvalid)
}
c.newContainerEvent(events.Restart)
@@ -1502,7 +1471,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
if !c.config.NoShm {
mounted, err := mount.Mounted(c.config.ShmDir)
if err != nil {
- return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir)
+ return "", fmt.Errorf("unable to determine if %q is mounted: %w", c.config.ShmDir, err)
}
if !mounted && !MountExists(c.config.Spec.Mounts, "/dev/shm") {
@@ -1511,7 +1480,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
return "", err
}
if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
- return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
+ return "", fmt.Errorf("failed to chown %s: %w", c.config.ShmDir, err)
}
defer func() {
if deferredErr != nil {
@@ -1531,11 +1500,11 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
overlayDest := c.runtime.RunRoot()
contentDir, err := overlay.GenerateStructure(overlayDest, c.ID(), "rootfs", c.RootUID(), c.RootGID())
if err != nil {
- return "", errors.Wrapf(err, "rootfs-overlay: failed to create TempDir in the %s directory", overlayDest)
+ return "", fmt.Errorf("rootfs-overlay: failed to create TempDir in the %s directory: %w", overlayDest, err)
}
overlayMount, err := overlay.Mount(contentDir, c.config.Rootfs, overlayDest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
if err != nil {
- return "", errors.Wrapf(err, "rootfs-overlay: creating overlay failed %q", c.config.Rootfs)
+ return "", fmt.Errorf("rootfs-overlay: creating overlay failed %q: %w", c.config.Rootfs, err)
}
// Seems fuse-overlayfs is not present
@@ -1545,7 +1514,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
mountOpts := label.FormatMountLabel(strings.Join(overlayMount.Options, ","), c.MountLabel())
err = mount.Mount("overlay", overlayMount.Source, overlayMount.Type, mountOpts)
if err != nil {
- return "", errors.Wrapf(err, "rootfs-overlay: creating overlay failed %q from native overlay", c.config.Rootfs)
+ return "", fmt.Errorf("rootfs-overlay: creating overlay failed %q from native overlay: %w", c.config.Rootfs, err)
}
}
@@ -1556,7 +1525,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
}
hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), uint32(execUser.Uid), uint32(execUser.Gid))
if err != nil {
- return "", errors.Wrap(err, "unable to get host UID and host GID")
+ return "", fmt.Errorf("unable to get host UID and host GID: %w", err)
}
//note: this should not be recursive, if using external rootfs users should be responsible on configuring ownership.
@@ -1583,30 +1552,30 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
dirfd, err := unix.Open(mountPoint, unix.O_RDONLY|unix.O_PATH, 0)
if err != nil {
- return "", errors.Wrap(err, "open mount point")
+ return "", fmt.Errorf("open mount point: %w", err)
}
defer unix.Close(dirfd)
err = unix.Mkdirat(dirfd, "etc", 0755)
if err != nil && !os.IsExist(err) {
- return "", errors.Wrap(err, "create /etc")
+ return "", fmt.Errorf("create /etc: %w", err)
}
// If the etc directory was created, chown it to root in the container
if err == nil && (rootUID != 0 || rootGID != 0) {
err = unix.Fchownat(dirfd, "etc", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
- return "", errors.Wrap(err, "chown /etc")
+ return "", fmt.Errorf("chown /etc: %w", err)
}
}
etcInTheContainerPath, err := securejoin.SecureJoin(mountPoint, "etc")
if err != nil {
- return "", errors.Wrap(err, "resolve /etc in the container")
+ return "", fmt.Errorf("resolve /etc in the container: %w", err)
}
etcInTheContainerFd, err := unix.Open(etcInTheContainerPath, unix.O_RDONLY|unix.O_PATH, 0)
if err != nil {
- return "", errors.Wrap(err, "open /etc in the container")
+ return "", fmt.Errorf("open /etc in the container: %w", err)
}
defer unix.Close(etcInTheContainerFd)
@@ -1614,13 +1583,13 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
// create it, so that mount command within the container will work.
err = unix.Symlinkat("/proc/mounts", etcInTheContainerFd, "mtab")
if err != nil && !os.IsExist(err) {
- return "", errors.Wrap(err, "creating /etc/mtab symlink")
+ return "", fmt.Errorf("creating /etc/mtab symlink: %w", err)
}
// If the symlink was created, then also chown it to root in the container
if err == nil && (rootUID != 0 || rootGID != 0) {
err = unix.Fchownat(etcInTheContainerFd, "mtab", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
- return "", errors.Wrap(err, "chown /etc/mtab")
+ return "", fmt.Errorf("chown /etc/mtab: %w", err)
}
}
@@ -1654,47 +1623,33 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
logrus.Debugf("Going to mount named volume %s", v.Name)
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+ return nil, fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
}
if vol.config.LockID == c.config.LockID {
- return nil, errors.Wrapf(define.ErrWillDeadlock, "container %s and volume %s share lock ID %d", c.ID(), vol.Name(), c.config.LockID)
+ return nil, fmt.Errorf("container %s and volume %s share lock ID %d: %w", c.ID(), vol.Name(), c.config.LockID, define.ErrWillDeadlock)
}
vol.lock.Lock()
defer vol.lock.Unlock()
if vol.needsMount() {
if err := vol.mount(); err != nil {
- return nil, errors.Wrapf(err, "error mounting volume %s for container %s", vol.Name(), c.ID())
+ return nil, fmt.Errorf("error mounting volume %s for container %s: %w", vol.Name(), c.ID(), err)
}
}
// The volume may need a copy-up. Check the state.
if err := vol.update(); err != nil {
return nil, err
}
- if vol.state.NeedsCopyUp {
+ _, hasNoCopy := vol.config.Options["nocopy"]
+ if vol.state.NeedsCopyUp && !cutil.StringInSlice("nocopy", v.Options) && !hasNoCopy {
logrus.Debugf("Copying up contents from container %s to volume %s", c.ID(), vol.Name())
- // If the volume is not empty, we should not copy up.
- volMount := vol.mountPoint()
- contents, err := ioutil.ReadDir(volMount)
- if err != nil {
- return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID())
- }
- if len(contents) > 0 {
- // The volume is not empty. It was likely modified
- // outside of Podman. For safety, let's not copy up into
- // it. Fixes CVE-2020-1726.
- return vol, nil
- }
-
srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest)
if err != nil {
- return nil, errors.Wrapf(err, "error calculating destination path to copy up container %s volume %s", c.ID(), vol.Name())
+ return nil, fmt.Errorf("error calculating destination path to copy up container %s volume %s: %w", c.ID(), vol.Name(), err)
}
// Do a manual stat on the source directory to verify existence.
// Skip the rest if it exists.
- // TODO: Should this be stat or lstat? I'm using lstat because I
- // think copy-up doesn't happen when the source is a link.
srcStat, err := os.Lstat(srcDir)
if err != nil {
if os.IsNotExist(err) {
@@ -1702,7 +1657,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
// up.
return vol, nil
}
- return nil, errors.Wrapf(err, "error identifying source directory for copy up into volume %s", vol.Name())
+ return nil, fmt.Errorf("error identifying source directory for copy up into volume %s: %w", vol.Name(), err)
}
// If it's not a directory we're mounting over it.
if !srcStat.IsDir() {
@@ -1714,12 +1669,25 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
// RHBZ#1928643
srcContents, err := ioutil.ReadDir(srcDir)
if err != nil {
- return nil, errors.Wrapf(err, "error reading contents of source directory for copy up into volume %s", vol.Name())
+ return nil, fmt.Errorf("error reading contents of source directory for copy up into volume %s: %w", vol.Name(), err)
}
if len(srcContents) == 0 {
return vol, nil
}
+ // If the volume is not empty, we should not copy up.
+ volMount := vol.mountPoint()
+ contents, err := ioutil.ReadDir(volMount)
+ if err != nil {
+ return nil, fmt.Errorf("error listing contents of volume %s mountpoint when copying up from container %s: %w", vol.Name(), c.ID(), err)
+ }
+ if len(contents) > 0 {
+ // The volume is not empty. It was likely modified
+ // outside of Podman. For safety, let's not copy up into
+ // it. Fixes CVE-2020-1726.
+ return vol, nil
+ }
+
// Set NeedsCopyUp to false since we are about to do first copy
// Do not copy second time.
vol.state.NeedsCopyUp = false
@@ -1753,11 +1721,11 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
if err2 != nil {
logrus.Errorf("Streaming contents of container %s directory for volume copy-up: %v", c.ID(), err2)
}
- return nil, errors.Wrapf(err, "error copying up to volume %s", vol.Name())
+ return nil, fmt.Errorf("error copying up to volume %s: %w", vol.Name(), err)
}
if err := <-errChan; err != nil {
- return nil, errors.Wrapf(err, "error streaming container content for copy up into volume %s", vol.Name())
+ return nil, fmt.Errorf("error streaming container content for copy up into volume %s: %w", vol.Name(), err)
}
}
return vol, nil
@@ -1792,7 +1760,7 @@ func (c *Container) cleanupStorage() error {
overlayBasePath := filepath.Dir(c.state.Mountpoint)
if err := overlay.Unmount(overlayBasePath); err != nil {
if cleanupErr != nil {
- logrus.Errorf("Failed to cleanup overlay mounts for %s: %v", c.ID(), err)
+ logrus.Errorf("Failed to clean up overlay mounts for %s: %v", c.ID(), err)
}
cleanupErr = err
}
@@ -1809,7 +1777,7 @@ func (c *Container) cleanupStorage() error {
if err := c.cleanupOverlayMounts(); err != nil {
// If the container can't remove content report the error
- logrus.Errorf("Failed to cleanup overlay mounts for %s: %v", c.ID(), err)
+ logrus.Errorf("Failed to clean up overlay mounts for %s: %v", c.ID(), err)
cleanupErr = err
}
@@ -1823,7 +1791,7 @@ func (c *Container) cleanupStorage() error {
// error
// We still want to be able to kick the container out of the
// state
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown || errors.Cause(err) == storage.ErrLayerNotMounted {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) || errors.Is(err, storage.ErrLayerNotMounted) {
logrus.Errorf("Storage for container %s has been removed", c.ID())
} else {
if cleanupErr != nil {
@@ -1840,7 +1808,7 @@ func (c *Container) cleanupStorage() error {
if cleanupErr != nil {
logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
- cleanupErr = errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+ cleanupErr = fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
// We need to try and unmount every volume, so continue
// if they fail.
@@ -1853,7 +1821,7 @@ func (c *Container) cleanupStorage() error {
if cleanupErr != nil {
logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
- cleanupErr = errors.Wrapf(err, "error unmounting volume %s for container %s", vol.Name(), c.ID())
+ cleanupErr = fmt.Errorf("error unmounting volume %s for container %s: %w", vol.Name(), c.ID(), err)
}
vol.lock.Unlock()
}
@@ -1878,7 +1846,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// Clean up network namespace, if present
if err := c.cleanupNetwork(); err != nil {
- lastError = errors.Wrapf(err, "error removing container %s network", c.ID())
+ lastError = fmt.Errorf("error removing container %s network: %w", c.ID(), err)
}
// cleanup host entry if it is shared
@@ -1888,7 +1856,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// we cannot use the dependency container lock due ABBA deadlocks
if lock, err := lockfile.GetLockfile(hoststFile); err == nil {
lock.Lock()
- // make sure to ignore ENOENT error in case the netns container was cleanup before this one
+ // make sure to ignore ENOENT error in case the netns container was cleaned up before this one
if err := etchosts.Remove(hoststFile, getLocalhostHostEntry(c)); err != nil && !errors.Is(err, os.ErrNotExist) {
// this error is not fatal we still want to do proper cleanup
logrus.Errorf("failed to remove hosts entry from the netns containers /etc/hosts: %v", err)
@@ -1916,7 +1884,7 @@ func (c *Container) cleanup(ctx context.Context) error {
if lastError != nil {
logrus.Errorf("Unmounting container %s storage: %v", c.ID(), err)
} else {
- lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
+ lastError = fmt.Errorf("error unmounting container %s storage: %w", c.ID(), err)
}
}
@@ -1947,6 +1915,18 @@ func (c *Container) cleanup(ctx context.Context) error {
}
}
+ // Prune the exit codes of other container during clean up.
+ // Since Podman is no daemon, we have to clean them up somewhere.
+ // Cleanup seems like a good place as it's not performance
+ // critical.
+ if err := c.runtime.state.PruneContainerExitCodes(); err != nil {
+ if lastError == nil {
+ lastError = err
+ } else {
+ logrus.Errorf("Pruning container exit codes: %v", err)
+ }
+ }
+
return lastError
}
@@ -1988,11 +1968,11 @@ func (c *Container) stopPodIfNeeded(ctx context.Context) error {
// hooks.
func (c *Container) delete(ctx context.Context) error {
if err := c.ociRuntime.DeleteContainer(c); err != nil {
- return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
+ return fmt.Errorf("error removing container %s from runtime: %w", c.ID(), err)
}
if err := c.postDeleteHooks(ctx); err != nil {
- return errors.Wrapf(err, "container %s poststop hooks", c.ID())
+ return fmt.Errorf("container %s poststop hooks: %w", c.ID(), err)
}
return nil
@@ -2051,7 +2031,7 @@ func (c *Container) writeStringToRundir(destFile, contents string) (string, erro
destFileName := filepath.Join(c.state.RunDir, destFile)
if err := os.Remove(destFileName); err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "error removing %s for container %s", destFile, c.ID())
+ return "", fmt.Errorf("error removing %s for container %s: %w", destFile, c.ID(), err)
}
if err := writeStringToPath(destFileName, contents, c.config.MountLabel, c.RootUID(), c.RootGID()); err != nil {
@@ -2085,22 +2065,22 @@ func (c *Container) saveSpec(spec *spec.Spec) error {
jsonPath := filepath.Join(c.bundlePath(), "config.json")
if _, err := os.Stat(jsonPath); err != nil {
if !os.IsNotExist(err) {
- return errors.Wrapf(err, "error doing stat on container %s spec", c.ID())
+ return fmt.Errorf("error doing stat on container %s spec: %w", c.ID(), err)
}
// The spec does not exist, we're fine
} else {
// The spec exists, need to remove it
if err := os.Remove(jsonPath); err != nil {
- return errors.Wrapf(err, "error replacing runtime spec for container %s", c.ID())
+ return fmt.Errorf("error replacing runtime spec for container %s: %w", c.ID(), err)
}
}
fileJSON, err := json.Marshal(spec)
if err != nil {
- return errors.Wrapf(err, "error exporting runtime spec for container %s to JSON", c.ID())
+ return fmt.Errorf("error exporting runtime spec for container %s to JSON: %w", c.ID(), err)
}
if err := ioutil.WriteFile(jsonPath, fileJSON, 0644); err != nil {
- return errors.Wrapf(err, "error writing runtime spec JSON for container %s to disk", c.ID())
+ return fmt.Errorf("error writing runtime spec JSON for container %s to disk: %w", c.ID(), err)
}
logrus.Debugf("Created OCI spec for container %s at %s", c.ID(), jsonPath)
@@ -2163,19 +2143,19 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s
// mount mounts the container's root filesystem
func (c *Container) mount() (string, error) {
if c.state.State == define.ContainerStateRemoving {
- return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
+ return "", fmt.Errorf("cannot mount container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
}
mountPoint, err := c.runtime.storageService.MountContainerImage(c.ID())
if err != nil {
- return "", errors.Wrapf(err, "error mounting storage for container %s", c.ID())
+ return "", fmt.Errorf("error mounting storage for container %s: %w", c.ID(), err)
}
mountPoint, err = filepath.EvalSymlinks(mountPoint)
if err != nil {
- return "", errors.Wrapf(err, "error resolving storage path for container %s", c.ID())
+ return "", fmt.Errorf("error resolving storage path for container %s: %w", c.ID(), err)
}
if err := os.Chown(mountPoint, c.RootUID(), c.RootGID()); err != nil {
- return "", errors.Wrapf(err, "cannot chown %s to %d:%d", mountPoint, c.RootUID(), c.RootGID())
+ return "", fmt.Errorf("cannot chown %s to %d:%d: %w", mountPoint, c.RootUID(), c.RootGID(), err)
}
return mountPoint, nil
}
@@ -2184,7 +2164,7 @@ func (c *Container) mount() (string, error) {
func (c *Container) unmount(force bool) error {
// Also unmount storage
if _, err := c.runtime.storageService.UnmountContainerImage(c.ID(), force); err != nil {
- return errors.Wrapf(err, "error unmounting container %s root filesystem", c.ID())
+ return fmt.Errorf("error unmounting container %s root filesystem: %w", c.ID(), err)
}
return nil
@@ -2197,11 +2177,11 @@ func (c *Container) unmount(force bool) error {
// Returns nil if safe to remove, or an error describing why it's unsafe if not.
func (c *Container) checkReadyForRemoval() error {
if c.state.State == define.ContainerStateUnknown {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
+ return fmt.Errorf("container %s is in invalid state: %w", c.ID(), define.ErrCtrStateInvalid)
}
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) && !c.IsInfra() {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String())
+ return fmt.Errorf("cannot remove container %s as it is %s - running or paused containers cannot be removed without force: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
}
// Check exec sessions
@@ -2210,7 +2190,7 @@ func (c *Container) checkReadyForRemoval() error {
return err
}
if len(sessions) != 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
+ return fmt.Errorf("cannot remove container %s as it has active exec sessions: %w", c.ID(), define.ErrCtrStateInvalid)
}
return nil
@@ -2313,7 +2293,7 @@ func (c *Container) checkExitFile() error {
return nil
}
- return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
+ return fmt.Errorf("error running stat on container %s exit file: %w", c.ID(), err)
}
// Alright, it exists. Transition to Stopped state.
@@ -2351,11 +2331,11 @@ func (c *Container) extractSecretToCtrStorage(secr *ContainerSecret) error {
hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), secr.UID, secr.GID)
if err != nil {
- return errors.Wrap(err, "unable to extract secret")
+ return fmt.Errorf("unable to extract secret: %w", err)
}
err = ioutil.WriteFile(secretFile, data, 0644)
if err != nil {
- return errors.Wrapf(err, "unable to create %s", secretFile)
+ return fmt.Errorf("unable to create %s: %w", secretFile, err)
}
if err := os.Lchown(secretFile, int(hostUID), int(hostGID)); err != nil {
return err
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index d7683cce9..390e95258 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -5,11 +5,11 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
"math"
- "net"
"os"
"os/user"
"path"
@@ -29,6 +29,7 @@ import (
"github.com/containers/buildah/pkg/overlay"
butil "github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/resolvconf"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/apparmor"
"github.com/containers/common/pkg/cgroups"
@@ -36,6 +37,7 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/subscriptions"
"github.com/containers/common/pkg/umask"
+ cutil "github.com/containers/common/pkg/util"
is "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
@@ -43,7 +45,6 @@ import (
"github.com/containers/podman/v4/pkg/checkpoint/crutils"
"github.com/containers/podman/v4/pkg/criu"
"github.com/containers/podman/v4/pkg/lookup"
- "github.com/containers/podman/v4/pkg/resolvconf"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/podman/v4/utils"
@@ -57,7 +58,6 @@ import (
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -65,7 +65,7 @@ import (
func (c *Container) mountSHM(shmOptions string) error {
if err := unix.Mount("shm", c.config.ShmDir, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV,
label.FormatMountLabel(shmOptions, c.config.MountLabel)); err != nil {
- return errors.Wrapf(err, "failed to mount shm tmpfs %q", c.config.ShmDir)
+ return fmt.Errorf("failed to mount shm tmpfs %q: %w", c.config.ShmDir, err)
}
return nil
}
@@ -73,7 +73,7 @@ func (c *Container) mountSHM(shmOptions string) error {
func (c *Container) unmountSHM(mount string) error {
if err := unix.Unmount(mount, 0); err != nil {
if err != syscall.EINVAL && err != syscall.ENOENT {
- return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount)
+ return fmt.Errorf("error unmounting container %s SHM mount %s: %w", c.ID(), mount, err)
}
// If it's just an EINVAL or ENOENT, debug logs only
logrus.Debugf("Container %s failed to unmount %s : %v", c.ID(), mount, err)
@@ -152,7 +152,7 @@ func (c *Container) prepare() error {
// createErr is guaranteed non-nil, so print
// unconditionally
logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
- createErr = errors.Wrapf(err, "error unmounting storage for container %s after network create failure", c.ID())
+ createErr = fmt.Errorf("error unmounting storage for container %s after network create failure: %w", c.ID(), err)
}
}
@@ -161,7 +161,7 @@ func (c *Container) prepare() error {
if createErr != nil {
if err := c.cleanupNetwork(); err != nil {
logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
- createErr = errors.Wrapf(err, "error cleaning up container %s network after setup failure", c.ID())
+ createErr = fmt.Errorf("error cleaning up container %s network after setup failure: %w", c.ID(), err)
}
}
@@ -251,7 +251,7 @@ func (c *Container) resolveWorkDir() error {
st, err := os.Stat(resolvedWorkdir)
if err == nil {
if !st.IsDir() {
- return errors.Errorf("workdir %q exists on container %s, but is not a directory", workdir, c.ID())
+ return fmt.Errorf("workdir %q exists on container %s, but is not a directory", workdir, c.ID())
}
return nil
}
@@ -265,11 +265,11 @@ func (c *Container) resolveWorkDir() error {
if c.isWorkDirSymlink(resolvedWorkdir) {
return nil
}
- return errors.Errorf("workdir %q does not exist on container %s", workdir, c.ID())
+ return fmt.Errorf("workdir %q does not exist on container %s", workdir, c.ID())
}
// This might be a serious error (e.g., permission), so
// we need to return the full error.
- return errors.Wrapf(err, "error detecting workdir %q on container %s", workdir, c.ID())
+ return fmt.Errorf("error detecting workdir %q on container %s: %w", workdir, c.ID(), err)
}
return nil
}
@@ -277,16 +277,16 @@ func (c *Container) resolveWorkDir() error {
if os.IsExist(err) {
return nil
}
- return errors.Wrapf(err, "error creating container %s workdir", c.ID())
+ return fmt.Errorf("error creating container %s workdir: %w", c.ID(), err)
}
// Ensure container entrypoint is created (if required).
uid, gid, _, err := chrootuser.GetUser(c.state.Mountpoint, c.User())
if err != nil {
- return errors.Wrapf(err, "error looking up %s inside of the container %s", c.User(), c.ID())
+ return fmt.Errorf("error looking up %s inside of the container %s: %w", c.User(), c.ID(), err)
}
if err := os.Chown(resolvedWorkdir, int(uid), int(gid)); err != nil {
- return errors.Wrapf(err, "error chowning container %s workdir to container root", c.ID())
+ return fmt.Errorf("error chowning container %s workdir to container root: %w", c.ID(), err)
}
return nil
@@ -311,7 +311,7 @@ func (c *Container) cleanupNetwork() error {
// Stop the container's network namespace (if it has one)
if err := c.runtime.teardownNetNS(c); err != nil {
- logrus.Errorf("Unable to cleanup network for container %s: %q", c.ID(), err)
+ logrus.Errorf("Unable to clean up network for container %s: %q", c.ID(), err)
}
c.state.NetNS = nil
@@ -367,7 +367,7 @@ func (c *Container) getUserOverrides() *lookup.Overrides {
func lookupHostUser(name string) (*runcuser.ExecUser, error) {
var execUser runcuser.ExecUser
- // Lookup User on host
+ // Look up User on host
u, err := util.LookupUser(name)
if err != nil {
return &execUser, err
@@ -387,13 +387,44 @@ func lookupHostUser(name string) (*runcuser.ExecUser, error) {
return &execUser, nil
}
+// Internal only function which returns upper and work dir from
+// overlay options.
+func getOverlayUpperAndWorkDir(options []string) (string, string, error) {
+ upperDir := ""
+ workDir := ""
+ for _, o := range options {
+ if strings.HasPrefix(o, "upperdir") {
+ splitOpt := strings.SplitN(o, "=", 2)
+ if len(splitOpt) > 1 {
+ upperDir = splitOpt[1]
+ if upperDir == "" {
+ return "", "", errors.New("cannot accept empty value for upperdir")
+ }
+ }
+ }
+ if strings.HasPrefix(o, "workdir") {
+ splitOpt := strings.SplitN(o, "=", 2)
+ if len(splitOpt) > 1 {
+ workDir = splitOpt[1]
+ if workDir == "" {
+ return "", "", errors.New("cannot accept empty value for workdir")
+ }
+ }
+ }
+ }
+ if (upperDir != "" && workDir == "") || (upperDir == "" && workDir != "") {
+ return "", "", errors.New("must specify both upperdir and workdir")
+ }
+ return upperDir, workDir, nil
+}
+
// Generate spec for a container
// Accepts a map of the container's dependencies
func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
overrides := c.getUserOverrides()
execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides)
if err != nil {
- if util.StringInSlice(c.config.User, c.config.HostUsers) {
+ if cutil.StringInSlice(c.config.User, c.config.HostUsers) {
execUser, err = lookupHostUser(c.config.User)
}
if err != nil {
@@ -406,6 +437,14 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
//nolint:staticcheck
g := generate.NewFromSpec(c.config.Spec)
+ // If the flag to mount all devices is set for a privileged container, add
+ // all the devices from the host's machine into the container
+ if c.config.MountAllDevices {
+ if err := util.AddPrivilegedDevices(&g); err != nil {
+ return nil, err
+ }
+ }
+
// If network namespace was requested, add it now
if c.config.CreateNetNS {
if c.config.PostConfigureNetNS {
@@ -446,7 +485,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
for _, namedVol := range c.config.NamedVolumes {
volume, err := c.runtime.GetVolume(namedVol.Name)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID())
+ return nil, fmt.Errorf("error retrieving volume %s to add to container %s: %w", namedVol.Name, c.ID(), err)
}
mountPoint, err := volume.MountPoint()
if err != nil {
@@ -459,23 +498,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
for _, o := range namedVol.Options {
if o == "O" {
overlayFlag = true
- }
- if overlayFlag && strings.Contains(o, "upperdir") {
- splitOpt := strings.SplitN(o, "=", 2)
- if len(splitOpt) > 1 {
- upperDir = splitOpt[1]
- if upperDir == "" {
- return nil, errors.New("cannot accept empty value for upperdir")
- }
- }
- }
- if overlayFlag && strings.Contains(o, "workdir") {
- splitOpt := strings.SplitN(o, "=", 2)
- if len(splitOpt) > 1 {
- workDir = splitOpt[1]
- if workDir == "" {
- return nil, errors.New("cannot accept empty value for workdir")
- }
+ upperDir, workDir, err = getOverlayUpperAndWorkDir(namedVol.Options)
+ if err != nil {
+ return nil, err
}
}
}
@@ -488,10 +513,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
return nil, err
}
- if (upperDir != "" && workDir == "") || (upperDir == "" && workDir != "") {
- return nil, errors.Wrapf(err, "must specify both upperdir and workdir")
- }
-
overlayOpts = &overlay.Options{RootUID: c.RootUID(),
RootGID: c.RootGID(),
UpperDirOptionFragment: upperDir,
@@ -501,7 +522,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
overlayMount, err = overlay.MountWithOptions(contentDir, mountPoint, namedVol.Dest, overlayOpts)
if err != nil {
- return nil, errors.Wrapf(err, "mounting overlay failed %q", mountPoint)
+ return nil, fmt.Errorf("mounting overlay failed %q: %w", mountPoint, err)
}
for _, o := range namedVol.Options {
@@ -584,13 +605,24 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Add overlay volumes
for _, overlayVol := range c.config.OverlayVolumes {
+ upperDir, workDir, err := getOverlayUpperAndWorkDir(overlayVol.Options)
+ if err != nil {
+ return nil, err
+ }
contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID())
if err != nil {
return nil, err
}
- overlayMount, err := overlay.Mount(contentDir, overlayVol.Source, overlayVol.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
+ overlayOpts := &overlay.Options{RootUID: c.RootUID(),
+ RootGID: c.RootGID(),
+ UpperDirOptionFragment: upperDir,
+ WorkDirOptionFragment: workDir,
+ GraphOpts: c.runtime.store.GraphOptions(),
+ }
+
+ overlayMount, err := overlay.MountWithOptions(contentDir, overlayVol.Source, overlayVol.Dest, overlayOpts)
if err != nil {
- return nil, errors.Wrapf(err, "mounting overlay failed %q", overlayVol.Source)
+ return nil, fmt.Errorf("mounting overlay failed %q: %w", overlayVol.Source, err)
}
// Check overlay volume options
@@ -614,16 +646,16 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Mount the specified image.
img, _, err := c.runtime.LibimageRuntime().LookupImage(volume.Source, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error creating image volume %q:%q", volume.Source, volume.Dest)
+ return nil, fmt.Errorf("error creating image volume %q:%q: %w", volume.Source, volume.Dest, err)
}
mountPoint, err := img.Mount(ctx, nil, "")
if err != nil {
- return nil, errors.Wrapf(err, "error mounting image volume %q:%q", volume.Source, volume.Dest)
+ return nil, fmt.Errorf("error mounting image volume %q:%q: %w", volume.Source, volume.Dest, err)
}
contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID())
if err != nil {
- return nil, errors.Wrapf(err, "failed to create TempDir in the %s directory", c.config.StaticDir)
+ return nil, fmt.Errorf("failed to create TempDir in the %s directory: %w", c.config.StaticDir, err)
}
var overlayMount spec.Mount
@@ -633,7 +665,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
overlayMount, err = overlay.MountReadOnly(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
}
if err != nil {
- return nil, errors.Wrapf(err, "creating overlay mount for image %q failed", volume.Source)
+ return nil, fmt.Errorf("creating overlay mount for image %q failed: %w", volume.Source, err)
}
g.AddMount(overlayMount)
}
@@ -658,7 +690,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if c.config.Umask != "" {
decVal, err := strconv.ParseUint(c.config.Umask, 8, 32)
if err != nil {
- return nil, errors.Wrapf(err, "Invalid Umask Value")
+ return nil, fmt.Errorf("invalid Umask Value: %w", err)
}
umask := uint32(decVal)
g.Config.Process.User.Umask = &umask
@@ -668,7 +700,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if len(c.config.Groups) > 0 {
gids, err := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, overrides)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s", c.ID())
+ return nil, fmt.Errorf("error looking up supplemental groups for container %s: %w", c.ID(), err)
}
for _, gid := range gids {
g.AddProcessAdditionalGid(gid)
@@ -677,7 +709,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if c.Systemd() {
if err := c.setupSystemd(g.Mounts(), g); err != nil {
- return nil, errors.Wrapf(err, "error adding systemd-specific mounts")
+ return nil, fmt.Errorf("error adding systemd-specific mounts: %w", err)
}
}
@@ -693,7 +725,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Check whether the current user namespace has enough gids available.
availableGids, err := rootless.GetAvailableGids()
if err != nil {
- return nil, errors.Wrapf(err, "cannot read number of available GIDs")
+ return nil, fmt.Errorf("cannot read number of available GIDs: %w", err)
}
gidMappings = []idtools.IDMap{{
ContainerID: 0,
@@ -838,6 +870,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err != nil {
return nil, err
}
+
g.SetLinuxCgroupsPath(cgroupPath)
// Warning: CDI may alter g.Config in place.
@@ -848,7 +881,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
_, err := registry.InjectDevices(g.Config, c.config.CDIDevices...)
if err != nil {
- return nil, errors.Wrapf(err, "error setting up CDI devices")
+ return nil, fmt.Errorf("error setting up CDI devices: %w", err)
}
}
@@ -872,7 +905,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if m.Type == "tmpfs" {
finalPath, err := securejoin.SecureJoin(c.state.Mountpoint, m.Destination)
if err != nil {
- return nil, errors.Wrapf(err, "error resolving symlinks for mount destination %s", m.Destination)
+ return nil, fmt.Errorf("error resolving symlinks for mount destination %s: %w", m.Destination, err)
}
trimmedPath := strings.TrimPrefix(finalPath, strings.TrimSuffix(c.state.Mountpoint, "/"))
m.Destination = trimmedPath
@@ -901,7 +934,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Warning: precreate hooks may alter g.Config in place.
if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil {
- return nil, errors.Wrapf(err, "error setting up OCI Hooks")
+ return nil, fmt.Errorf("error setting up OCI Hooks: %w", err)
}
if len(c.config.EnvSecrets) > 0 {
manager, err := c.runtime.SecretsManager()
@@ -953,11 +986,11 @@ func (c *Container) mountNotifySocket(g generate.Generator) error {
logrus.Debugf("Checking notify %q dir", notifyDir)
if err := os.MkdirAll(notifyDir, 0755); err != nil {
if !os.IsExist(err) {
- return errors.Wrapf(err, "unable to create notify %q dir", notifyDir)
+ return fmt.Errorf("unable to create notify %q dir: %w", notifyDir, err)
}
}
if err := label.Relabel(notifyDir, c.MountLabel(), true); err != nil {
- return errors.Wrapf(err, "relabel failed %q", notifyDir)
+ return fmt.Errorf("relabel failed %q: %w", notifyDir, err)
}
logrus.Debugf("Add bindmount notify %q dir", notifyDir)
if _, ok := c.state.BindMounts["/run/notify"]; !ok {
@@ -1080,7 +1113,7 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro
func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr string, specNS spec.LinuxNamespaceType) error {
nsCtr, err := c.runtime.state.Container(ctr)
if err != nil {
- return errors.Wrapf(err, "error retrieving dependency %s of container %s from state", ctr, c.ID())
+ return fmt.Errorf("error retrieving dependency %s of container %s from state: %w", ctr, c.ID(), err)
}
if specNS == spec.UTSNamespace {
@@ -1090,7 +1123,6 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
g.AddProcessEnv("HOSTNAME", hostname)
}
- // TODO need unlocked version of this for use in pods
nsPath, err := nsCtr.NamespacePath(ns)
if err != nil {
return err
@@ -1110,7 +1142,7 @@ func (c *Container) addCheckpointImageMetadata(importBuilder *buildah.Builder) e
return fmt.Errorf("getting host info: %v", err)
}
- criuVersion, err := criu.GetCriuVestion()
+ criuVersion, err := criu.GetCriuVersion()
if err != nil {
return fmt.Errorf("getting criu version: %v", err)
}
@@ -1168,7 +1200,7 @@ func (c *Container) createCheckpointImage(ctx context.Context, options Container
// Create storage reference
imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, options.CreateImage)
if err != nil {
- return errors.Errorf("Failed to parse image name")
+ return errors.New("failed to parse image name")
}
// Build an image scratch
@@ -1179,7 +1211,7 @@ func (c *Container) createCheckpointImage(ctx context.Context, options Container
if err != nil {
return err
}
- // Clean-up buildah working container
+ // Clean up buildah working container
defer func() {
if err := importBuilder.Delete(); err != nil {
logrus.Errorf("Image builder delete failed: %v", err)
@@ -1232,23 +1264,23 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
// Check if the dependency is an infra container. If it is we can checkpoint
// the container out of the Pod.
if c.config.Pod == "" {
- return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ return errors.New("cannot export checkpoints of containers with dependencies")
}
pod, err := c.runtime.state.Pod(c.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), c.config.Pod)
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), c.config.Pod, err)
}
infraID, err := pod.InfraContainerID()
if err != nil {
- return errors.Wrapf(err, "cannot retrieve infra container ID for pod %s", c.config.Pod)
+ return fmt.Errorf("cannot retrieve infra container ID for pod %s: %w", c.config.Pod, err)
}
if c.Dependencies()[0] != infraID {
- return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ return errors.New("cannot export checkpoints of containers with dependencies")
}
}
if len(c.Dependencies()) > 1 {
- return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ return errors.New("cannot export checkpoints of containers with dependencies")
}
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
@@ -1276,7 +1308,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
// To correctly track deleted files, let's go through the output of 'podman diff'
rootFsChanges, err := c.runtime.GetDiff("", c.ID(), define.DiffContainer)
if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID())
+ return fmt.Errorf("error exporting root file-system diff for %q: %w", c.ID(), err)
}
addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath())
@@ -1293,7 +1325,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
// Create an archive for each volume associated with the container
if !options.IgnoreVolumes {
if err := os.MkdirAll(expVolDir, 0700); err != nil {
- return errors.Wrapf(err, "error creating volumes export directory %q", expVolDir)
+ return fmt.Errorf("error creating volumes export directory %q: %w", expVolDir, err)
}
for _, v := range c.config.NamedVolumes {
@@ -1302,7 +1334,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
volumeTarFile, err := os.Create(volumeTarFileFullPath)
if err != nil {
- return errors.Wrapf(err, "error creating %q", volumeTarFileFullPath)
+ return fmt.Errorf("error creating %q: %w", volumeTarFileFullPath, err)
}
volume, err := c.runtime.GetVolume(v.Name)
@@ -1315,7 +1347,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return err
}
if mp == "" {
- return errors.Wrapf(define.ErrInternal, "volume %s is not mounted, cannot export", volume.Name())
+ return fmt.Errorf("volume %s is not mounted, cannot export: %w", volume.Name(), define.ErrInternal)
}
input, err := archive.TarWithOptions(mp, &archive.TarOptions{
@@ -1323,7 +1355,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
IncludeSourceDir: true,
})
if err != nil {
- return errors.Wrapf(err, "error reading volume directory %q", v.Dest)
+ return fmt.Errorf("error reading volume directory %q: %w", v.Dest, err)
}
_, err = io.Copy(volumeTarFile, input)
@@ -1343,12 +1375,12 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
})
if err != nil {
- return errors.Wrapf(err, "error reading checkpoint directory %q", c.ID())
+ return fmt.Errorf("error reading checkpoint directory %q: %w", c.ID(), err)
}
outFile, err := os.Create(options.TargetFile)
if err != nil {
- return errors.Wrapf(err, "error creating checkpoint export file %q", options.TargetFile)
+ return fmt.Errorf("error creating checkpoint export file %q: %w", options.TargetFile, err)
}
defer outFile.Close()
@@ -1374,10 +1406,10 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
func (c *Container) checkpointRestoreSupported(version int) error {
if !criu.CheckForCriu(version) {
- return errors.Errorf("checkpoint/restore requires at least CRIU %d", version)
+ return fmt.Errorf("checkpoint/restore requires at least CRIU %d", version)
}
if !c.ociRuntime.SupportsCheckpoint() {
- return errors.Errorf("configured runtime does not support checkpoint/restore")
+ return errors.New("configured runtime does not support checkpoint/restore")
}
return nil
}
@@ -1388,11 +1420,11 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if c.state.State != define.ContainerStateRunning {
- return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
+ return nil, 0, fmt.Errorf("%q is not running, cannot checkpoint: %w", c.state.State, define.ErrCtrStateInvalid)
}
if c.AutoRemove() && options.TargetFile == "" {
- return nil, 0, errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
+ return nil, 0, errors.New("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
if err := c.resolveCheckpointImageName(&options); err != nil {
@@ -1473,7 +1505,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
c.state.Restored = false
c.state.RestoredTime = time.Time{}
- // Cleanup Storage and Network
+ // Clean up Storage and Network
if err := c.cleanup(ctx); err != nil {
return nil, 0, err
}
@@ -1485,12 +1517,12 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
statsDirectory, err := os.Open(c.bundlePath())
if err != nil {
- return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath())
+ return nil, fmt.Errorf("not able to open %q: %w", c.bundlePath(), err)
}
dumpStatistics, err := stats.CriuGetDumpStats(statsDirectory)
if err != nil {
- return nil, errors.Wrap(err, "Displaying checkpointing statistics not possible")
+ return nil, fmt.Errorf("displaying checkpointing statistics not possible: %w", err)
}
return &define.CRIUCheckpointRestoreStatistics{
@@ -1536,7 +1568,7 @@ func (c *Container) generateContainerSpec() error {
g := generate.NewFromSpec(c.config.Spec)
if err := c.saveSpec(g.Config); err != nil {
- return errors.Wrap(err, "saving imported container specification for restore failed")
+ return fmt.Errorf("saving imported container specification for restore failed: %w", err)
}
return nil
@@ -1594,14 +1626,14 @@ func (c *Container) importCheckpointTar(input string) error {
func (c *Container) importPreCheckpoint(input string) error {
archiveFile, err := os.Open(input)
if err != nil {
- return errors.Wrap(err, "failed to open pre-checkpoint archive for import")
+ return fmt.Errorf("failed to open pre-checkpoint archive for import: %w", err)
}
defer archiveFile.Close()
err = archive.Untar(archiveFile, c.bundlePath(), nil)
if err != nil {
- return errors.Wrapf(err, "Unpacking of pre-checkpoint archive %s failed", input)
+ return fmt.Errorf("unpacking of pre-checkpoint archive %s failed: %w", input, err)
}
return nil
}
@@ -1618,11 +1650,11 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
if options.Pod != "" && !crutils.CRRuntimeSupportsPodCheckpointRestore(c.ociRuntime.Path()) {
- return nil, 0, errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
+ return nil, 0, fmt.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
}
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
- return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
+ return nil, 0, fmt.Errorf("container %s is running or paused, cannot restore: %w", c.ID(), define.ErrCtrStateInvalid)
}
if options.ImportPrevious != "" {
@@ -1644,7 +1676,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
// no sense to try a restore. This is a minimal check if a checkpoint exist.
if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) {
- return nil, 0, errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
+ return nil, 0, fmt.Errorf("a complete checkpoint for this container cannot be found, cannot restore: %w", err)
}
if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
@@ -1742,23 +1774,23 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// the ones from the infrastructure container.
pod, err := c.runtime.LookupPod(options.Pod)
if err != nil {
- return nil, 0, errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod)
+ return nil, 0, fmt.Errorf("pod %q cannot be retrieved: %w", options.Pod, err)
}
infraContainer, err := pod.InfraContainer()
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieved infra container from pod %q: %w", options.Pod, err)
}
infraContainer.lock.Lock()
if err := infraContainer.syncContainer(); err != nil {
infraContainer.lock.Unlock()
- return nil, 0, errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID())
+ return nil, 0, fmt.Errorf("error syncing infrastructure container %s status: %w", infraContainer.ID(), err)
}
if infraContainer.state.State != define.ContainerStateRunning {
if err := infraContainer.initAndStart(ctx); err != nil {
infraContainer.lock.Unlock()
- return nil, 0, errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID())
+ return nil, 0, fmt.Errorf("error starting infrastructure container %s status: %w", infraContainer.ID(), err)
}
}
infraContainer.lock.Unlock()
@@ -1766,7 +1798,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.IPCNsCtr != "" {
nsPath, err := infraContainer.namespacePath(IPCNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve IPC namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1776,7 +1808,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.NetNsCtr != "" {
nsPath, err := infraContainer.namespacePath(NetNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve network namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1786,7 +1818,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.PIDNsCtr != "" {
nsPath, err := infraContainer.namespacePath(PIDNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve PID namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1796,7 +1828,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.UTSNsCtr != "" {
nsPath, err := infraContainer.namespacePath(UTSNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve UTS namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1806,7 +1838,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.CgroupNsCtr != "" {
nsPath, err := infraContainer.namespacePath(CgroupNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve Cgroup namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1874,13 +1906,13 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
volumeFile, err := os.Open(volumeFilePath)
if err != nil {
- return nil, 0, errors.Wrapf(err, "failed to open volume file %s", volumeFilePath)
+ return nil, 0, fmt.Errorf("failed to open volume file %s: %w", volumeFilePath, err)
}
defer volumeFile.Close()
volume, err := c.runtime.GetVolume(v.Name)
if err != nil {
- return nil, 0, errors.Wrapf(err, "failed to retrieve volume %s", v.Name)
+ return nil, 0, fmt.Errorf("failed to retrieve volume %s: %w", v.Name, err)
}
mountPoint, err := volume.MountPoint()
@@ -1888,10 +1920,10 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return nil, 0, err
}
if mountPoint == "" {
- return nil, 0, errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name())
+ return nil, 0, fmt.Errorf("unable to import volume %s as it is not mounted: %w", volume.Name(), err)
}
if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil {
- return nil, 0, errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint)
+ return nil, 0, fmt.Errorf("failed to extract volume %s to %s: %w", volumeFilePath, mountPoint, err)
}
}
}
@@ -1918,12 +1950,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
statsDirectory, err := os.Open(c.bundlePath())
if err != nil {
- return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath())
+ return nil, fmt.Errorf("not able to open %q: %w", c.bundlePath(), err)
}
restoreStatistics, err := stats.CriuGetRestoreStats(statsDirectory)
if err != nil {
- return nil, errors.Wrap(err, "Displaying restore statistics not possible")
+ return nil, fmt.Errorf("displaying restore statistics not possible: %w", err)
}
return &define.CRIUCheckpointRestoreStatistics{
@@ -2001,7 +2033,7 @@ func (c *Container) getRootNetNsDepCtr() (depCtr *Container, err error) {
depCtr, err = c.runtime.state.Container(nextCtr)
if err != nil {
- return nil, errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID())
+ return nil, fmt.Errorf("error fetching dependency %s of container %s: %w", c.config.NetNsCtr, c.ID(), err)
}
// This should never happen without an error
if depCtr == nil {
@@ -2030,7 +2062,7 @@ func (c *Container) mountIntoRootDirs(mountName string, mountPath string) error
// Make standard bind mounts to include in the container
func (c *Container) makeBindMounts() error {
if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
- return errors.Wrap(err, "cannot chown run directory")
+ return fmt.Errorf("cannot chown run directory: %w", err)
}
if c.state.BindMounts == nil {
@@ -2048,13 +2080,13 @@ func (c *Container) makeBindMounts() error {
if c.config.NetNsCtr == "" {
if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "container %s", c.ID())
+ return fmt.Errorf("container %s: %w", c.ID(), err)
}
delete(c.state.BindMounts, "/etc/resolv.conf")
}
if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "container %s", c.ID())
+ return fmt.Errorf("container %s: %w", c.ID(), err)
}
delete(c.state.BindMounts, "/etc/hosts")
}
@@ -2067,13 +2099,13 @@ func (c *Container) makeBindMounts() error {
// them.
depCtr, err := c.getRootNetNsDepCtr()
if err != nil {
- return errors.Wrapf(err, "error fetching network namespace dependency container for container %s", c.ID())
+ return fmt.Errorf("error fetching network namespace dependency container for container %s: %w", c.ID(), err)
}
// We need that container's bind mounts
bindMounts, err := depCtr.BindMounts()
if err != nil {
- return errors.Wrapf(err, "error fetching bind mounts from dependency %s of container %s", depCtr.ID(), c.ID())
+ return fmt.Errorf("error fetching bind mounts from dependency %s of container %s: %w", depCtr.ID(), c.ID(), err)
}
// The other container may not have a resolv.conf or /etc/hosts
@@ -2083,7 +2115,7 @@ func (c *Container) makeBindMounts() error {
err := c.mountIntoRootDirs("/etc/resolv.conf", resolvPath)
if err != nil {
- return errors.Wrapf(err, "error assigning mounts to container %s", c.ID())
+ return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err)
}
}
@@ -2103,13 +2135,13 @@ func (c *Container) makeBindMounts() error {
err = etchosts.Add(hostsPath, getLocalhostHostEntry(c))
lock.Unlock()
if err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s which depends on container %s", c.ID(), depCtr.ID())
+ return fmt.Errorf("error creating hosts file for container %s which depends on container %s: %w", c.ID(), depCtr.ID(), err)
}
// finally, save it in the new container
err = c.mountIntoRootDirs(config.DefaultHostsFile, hostsPath)
if err != nil {
- return errors.Wrapf(err, "error assigning mounts to container %s", c.ID())
+ return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err)
}
}
@@ -2124,13 +2156,13 @@ func (c *Container) makeBindMounts() error {
} else {
if !c.config.UseImageResolvConf {
if err := c.generateResolvConf(); err != nil {
- return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
+ return fmt.Errorf("error creating resolv.conf for container %s: %w", c.ID(), err)
}
}
if !c.config.UseImageHosts {
if err := c.createHosts(); err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
+ return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err)
}
}
}
@@ -2148,7 +2180,7 @@ func (c *Container) makeBindMounts() error {
}
} else if !c.config.UseImageHosts && c.state.BindMounts["/etc/hosts"] == "" {
if err := c.createHosts(); err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
+ return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err)
}
}
@@ -2160,7 +2192,7 @@ func (c *Container) makeBindMounts() error {
if c.config.Passwd == nil || *c.config.Passwd {
newPasswd, newGroup, err := c.generatePasswdAndGroup()
if err != nil {
- return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
+ return fmt.Errorf("error creating temporary passwd file for container %s: %w", c.ID(), err)
}
if newPasswd != "" {
// Make /etc/passwd
@@ -2181,7 +2213,7 @@ func (c *Container) makeBindMounts() error {
if _, ok := c.state.BindMounts["/etc/hostname"]; !ok {
hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname())
if err != nil {
- return errors.Wrapf(err, "error creating hostname file for container %s", c.ID())
+ return fmt.Errorf("error creating hostname file for container %s: %w", c.ID(), err)
}
c.state.BindMounts["/etc/hostname"] = hostnamePath
}
@@ -2193,7 +2225,7 @@ func (c *Container) makeBindMounts() error {
if ctrTimezone != "local" {
_, err = time.LoadLocation(ctrTimezone)
if err != nil {
- return errors.Wrapf(err, "error finding timezone for container %s", c.ID())
+ return fmt.Errorf("error finding timezone for container %s: %w", c.ID(), err)
}
}
if _, ok := c.state.BindMounts["/etc/localtime"]; !ok {
@@ -2201,25 +2233,36 @@ func (c *Container) makeBindMounts() error {
if ctrTimezone == "local" {
zonePath, err = filepath.EvalSymlinks("/etc/localtime")
if err != nil {
- return errors.Wrapf(err, "error finding local timezone for container %s", c.ID())
+ return fmt.Errorf("error finding local timezone for container %s: %w", c.ID(), err)
}
} else {
zone := filepath.Join("/usr/share/zoneinfo", ctrTimezone)
zonePath, err = filepath.EvalSymlinks(zone)
if err != nil {
- return errors.Wrapf(err, "error setting timezone for container %s", c.ID())
+ return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err)
}
}
localtimePath, err := c.copyTimezoneFile(zonePath)
if err != nil {
- return errors.Wrapf(err, "error setting timezone for container %s", c.ID())
+ return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err)
}
c.state.BindMounts["/etc/localtime"] = localtimePath
}
}
+ _, hasRunContainerenv := c.state.BindMounts["/run/.containerenv"]
+ if !hasRunContainerenv {
+ // check in the spec mounts
+ for _, m := range c.config.Spec.Mounts {
+ if m.Destination == "/run/.containerenv" || m.Destination == "/run" {
+ hasRunContainerenv = true
+ break
+ }
+ }
+ }
+
// Make .containerenv if it does not exist
- if _, ok := c.state.BindMounts["/run/.containerenv"]; !ok {
+ if !hasRunContainerenv {
containerenv := c.runtime.graphRootMountedFlag(c.config.Spec.Mounts)
isRootless := 0
if rootless.IsRootless() {
@@ -2239,7 +2282,7 @@ rootless=%d
}
containerenvPath, err := c.writeStringToRundir(".containerenv", containerenv)
if err != nil {
- return errors.Wrapf(err, "error creating containerenv file for container %s", c.ID())
+ return fmt.Errorf("error creating containerenv file for container %s: %w", c.ID(), err)
}
c.state.BindMounts["/run/.containerenv"] = containerenvPath
}
@@ -2260,7 +2303,7 @@ rootless=%d
if len(c.Secrets()) > 0 {
// create /run/secrets if subscriptions did not create
if err := c.createSecretMountDir(); err != nil {
- return errors.Wrapf(err, "error creating secrets mount")
+ return fmt.Errorf("error creating secrets mount: %w", err)
}
for _, secret := range c.Secrets() {
secretFileName := secret.Name
@@ -2284,49 +2327,10 @@ rootless=%d
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() error {
var (
- nameservers []string
networkNameServers []string
networkSearchDomains []string
)
- hostns := true
- resolvConf := "/etc/resolv.conf"
- for _, namespace := range c.config.Spec.Linux.Namespaces {
- if namespace.Type == spec.NetworkNamespace {
- hostns = false
- if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") {
- definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf")
- _, err := os.Stat(definedPath)
- if err == nil {
- resolvConf = definedPath
- } else if !os.IsNotExist(err) {
- return err
- }
- }
- break
- }
- }
-
- contents, err := ioutil.ReadFile(resolvConf)
- // resolv.conf doesn't have to exists
- if err != nil && !os.IsNotExist(err) {
- return err
- }
-
- ns := resolvconf.GetNameservers(contents)
- // check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver
- if !hostns && len(ns) == 1 && ns[0] == "127.0.0.53" {
- // read the actual resolv.conf file for systemd-resolved
- resolvedContents, err := ioutil.ReadFile("/run/systemd/resolve/resolv.conf")
- if err != nil {
- if !os.IsNotExist(err) {
- return errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf")
- }
- } else {
- contents = resolvedContents
- }
- }
-
netStatus := c.getNetworkStatus()
for _, status := range netStatus {
if status.DNSServerIPs != nil {
@@ -2346,34 +2350,18 @@ func (c *Container) generateResolvConf() error {
return err
}
- // Ensure that the container's /etc/resolv.conf is compatible with its
- // network configuration.
- resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, !hostns)
- if err != nil {
- return errors.Wrapf(err, "error parsing host resolv.conf")
- }
-
- dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers)+len(c.config.DNSServer))
- for _, i := range c.runtime.config.Containers.DNSServers {
- result := net.ParseIP(i)
- if result == nil {
- return errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i)
- }
- dns = append(dns, result)
+ nameservers := make([]string, 0, len(c.runtime.config.Containers.DNSServers)+len(c.config.DNSServer))
+ nameservers = append(nameservers, c.runtime.config.Containers.DNSServers...)
+ for _, ip := range c.config.DNSServer {
+ nameservers = append(nameservers, ip.String())
}
- dns = append(dns, c.config.DNSServer...)
// If the user provided dns, it trumps all; then dns masq; then resolv.conf
var search []string
- switch {
- case len(dns) > 0:
- // We store DNS servers as net.IP, so need to convert to string
- for _, server := range dns {
- nameservers = append(nameservers, server.String())
- }
- default:
- // Make a new resolv.conf
+ keepHostServers := false
+ if len(nameservers) == 0 {
+ keepHostServers = true
// first add the nameservers from the networks status
- nameservers = append(nameservers, networkNameServers...)
+ nameservers = networkNameServers
// when we add network dns server we also have to add the search domains
search = networkSearchDomains
// slirp4netns has a built in DNS forwarder.
@@ -2385,38 +2373,34 @@ func (c *Container) generateResolvConf() error {
nameservers = append(nameservers, slirp4netnsDNS.String())
}
}
- nameservers = append(nameservers, resolvconf.GetNameservers(resolv.Content)...)
}
if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 {
- if !util.StringInSlice(".", c.config.DNSSearch) {
- search = append(search, c.runtime.config.Containers.DNSSearches...)
- search = append(search, c.config.DNSSearch...)
- }
- } else {
- search = append(search, resolvconf.GetSearchDomains(resolv.Content)...)
+ customSearch := make([]string, 0, len(c.config.DNSSearch)+len(c.runtime.config.Containers.DNSSearches))
+ customSearch = append(customSearch, c.runtime.config.Containers.DNSSearches...)
+ customSearch = append(customSearch, c.config.DNSSearch...)
+ search = customSearch
}
- var options []string
- if len(c.config.DNSOption) > 0 || len(c.runtime.config.Containers.DNSOptions) > 0 {
- options = c.runtime.config.Containers.DNSOptions
- options = append(options, c.config.DNSOption...)
- } else {
- options = resolvconf.GetOptions(resolv.Content)
- }
+ options := make([]string, 0, len(c.config.DNSOption)+len(c.runtime.config.Containers.DNSOptions))
+ options = append(options, c.runtime.config.Containers.DNSOptions...)
+ options = append(options, c.config.DNSOption...)
destPath := filepath.Join(c.state.RunDir, "resolv.conf")
- if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "container %s", c.ID())
- }
-
- // Build resolv.conf
- if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil {
- return errors.Wrapf(err, "error building resolv.conf for container %s", c.ID())
+ if err := resolvconf.New(&resolvconf.Params{
+ IPv6Enabled: ipv6,
+ KeepHostServers: keepHostServers,
+ Nameservers: nameservers,
+ Namespaces: c.config.Spec.Linux.Namespaces,
+ Options: options,
+ Path: destPath,
+ Searches: search,
+ }); err != nil {
+ return fmt.Errorf("error building resolv.conf for container %s: %w", c.ID(), err)
}
- return c.bindMountRootFile(destPath, "/etc/resolv.conf")
+ return c.bindMountRootFile(destPath, resolvconf.DefaultResolvConf)
}
// Check if a container uses IPv6.
@@ -2457,31 +2441,13 @@ func (c *Container) addNameserver(ips []string) error {
}
// Do we have a resolv.conf at all?
- path, ok := c.state.BindMounts["/etc/resolv.conf"]
+ path, ok := c.state.BindMounts[resolvconf.DefaultResolvConf]
if !ok {
return nil
}
- // Read in full contents, parse out existing nameservers
- contents, err := ioutil.ReadFile(path)
- if err != nil {
- return err
- }
- ns := resolvconf.GetNameservers(contents)
- options := resolvconf.GetOptions(contents)
- search := resolvconf.GetSearchDomains(contents)
-
- // We could verify that it doesn't already exist
- // but extra nameservers shouldn't harm anything.
- // Ensure we are the first entry in resolv.conf though, otherwise we
- // might be after user-added servers.
- ns = append(ips, ns...)
-
- // We're rewriting the container's resolv.conf as part of this, but we
- // hold the container lock, so there should be no risk of parallel
- // modification.
- if _, err := resolvconf.Build(path, ns, search, options); err != nil {
- return errors.Wrapf(err, "error adding new nameserver to container %s resolv.conf", c.ID())
+ if err := resolvconf.Add(path, ips); err != nil {
+ return fmt.Errorf("adding new nameserver to container %s resolv.conf: %w", c.ID(), err)
}
return nil
@@ -2496,34 +2462,13 @@ func (c *Container) removeNameserver(ips []string) error {
}
// Do we have a resolv.conf at all?
- path, ok := c.state.BindMounts["/etc/resolv.conf"]
+ path, ok := c.state.BindMounts[resolvconf.DefaultResolvConf]
if !ok {
return nil
}
- // Read in full contents, parse out existing nameservers
- contents, err := ioutil.ReadFile(path)
- if err != nil {
- return err
- }
- ns := resolvconf.GetNameservers(contents)
- options := resolvconf.GetOptions(contents)
- search := resolvconf.GetSearchDomains(contents)
-
- toRemove := make(map[string]bool)
- for _, ip := range ips {
- toRemove[ip] = true
- }
-
- newNS := make([]string, 0, len(ns))
- for _, server := range ns {
- if !toRemove[server] {
- newNS = append(newNS, server)
- }
- }
-
- if _, err := resolvconf.Build(path, newNS, search, options); err != nil {
- return errors.Wrapf(err, "error removing nameservers from container %s resolv.conf", c.ID())
+ if err := resolvconf.Remove(path, ips); err != nil {
+ return fmt.Errorf("removing nameservers from container %s resolv.conf: %w", c.ID(), err)
}
return nil
@@ -2653,16 +2598,16 @@ func (c *Container) generateCurrentUserGroupEntry() (string, int, error) {
g, err := user.LookupGroupId(strconv.Itoa(gid))
if err != nil {
- return "", 0, errors.Wrapf(err, "failed to get current group")
+ return "", 0, fmt.Errorf("failed to get current group: %w", err)
}
- // Lookup group name to see if it exists in the image.
+ // Look up group name to see if it exists in the image.
_, err = lookup.GetGroup(c.state.Mountpoint, g.Name)
if err != runcuser.ErrNoGroupEntries {
return "", 0, err
}
- // Lookup GID to see if it exists in the image.
+ // Look up GID to see if it exists in the image.
_, err = lookup.GetGroup(c.state.Mountpoint, g.Gid)
if err != runcuser.ErrNoGroupEntries {
return "", 0, err
@@ -2675,7 +2620,7 @@ func (c *Container) generateCurrentUserGroupEntry() (string, int, error) {
if uid != 0 {
u, err := user.LookupId(strconv.Itoa(uid))
if err != nil {
- return "", 0, errors.Wrapf(err, "failed to get current user to make group entry")
+ return "", 0, fmt.Errorf("failed to get current user to make group entry: %w", err)
}
username = u.Username
}
@@ -2699,7 +2644,7 @@ func (c *Container) generateUserGroupEntry(addedGID int) (string, error) {
gid, err := strconv.ParseUint(group, 10, 32)
if err != nil {
- return "", nil // nolint: nilerr
+ return "", nil //nolint: nilerr
}
if addedGID != 0 && addedGID == int(gid) {
@@ -2732,7 +2677,7 @@ func (c *Container) generatePasswdEntry() (string, error) {
addedUID := 0
for _, userid := range c.config.HostUsers {
- // Lookup User on host
+ // Look up User on host
u, err := util.LookupUser(userid)
if err != nil {
return "", err
@@ -2773,7 +2718,7 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) {
u, err := user.LookupId(strconv.Itoa(uid))
if err != nil {
- return "", 0, 0, errors.Wrapf(err, "failed to get current user")
+ return "", 0, 0, fmt.Errorf("failed to get current user: %w", err)
}
pwd, err := c.userPasswdEntry(u)
if err != nil {
@@ -2784,13 +2729,13 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) {
}
func (c *Container) userPasswdEntry(u *user.User) (string, error) {
- // Lookup the user to see if it exists in the container image.
+ // Look up the user to see if it exists in the container image.
_, err := lookup.GetUser(c.state.Mountpoint, u.Username)
if err != runcuser.ErrNoPasswdEntries {
return "", err
}
- // Lookup the UID to see if it exists in the container image.
+ // Look up the UID to see if it exists in the container image.
_, err = lookup.GetUser(c.state.Mountpoint, u.Uid)
if err != runcuser.ErrNoPasswdEntries {
return "", err
@@ -2855,14 +2800,14 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, error) {
// If a non numeric User, then don't generate passwd
uid, err := strconv.ParseUint(userspec, 10, 32)
if err != nil {
- return "", nil // nolint: nilerr
+ return "", nil //nolint: nilerr
}
if addedUID != 0 && int(uid) == addedUID {
return "", nil
}
- // Lookup the user to see if it exists in the container image
+ // Look up the user to see if it exists in the container image
_, err = lookup.GetUser(c.state.Mountpoint, userspec)
if err != runcuser.ErrNoPasswdEntries {
return "", err
@@ -2875,7 +2820,7 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, error) {
} else {
group, err := lookup.GetGroup(c.state.Mountpoint, groupspec)
if err != nil {
- return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
+ return "", fmt.Errorf("unable to get gid %s from group file: %w", groupspec, err)
}
gid = group.Gid
}
@@ -2984,7 +2929,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Making /etc/passwd for container %s", c.ID())
originPasswdFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
if err != nil {
- return "", "", errors.Wrapf(err, "error creating path to container %s /etc/passwd", c.ID())
+ return "", "", fmt.Errorf("error creating path to container %s /etc/passwd: %w", c.ID(), err)
}
orig, err := ioutil.ReadFile(originPasswdFile)
if err != nil && !os.IsNotExist(err) {
@@ -2992,7 +2937,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
}
passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+passwdEntry)
if err != nil {
- return "", "", errors.Wrapf(err, "failed to create temporary passwd file")
+ return "", "", fmt.Errorf("failed to create temporary passwd file: %w", err)
}
if err := os.Chmod(passwdFile, 0644); err != nil {
return "", "", err
@@ -3002,17 +2947,17 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Modifying container %s /etc/passwd", c.ID())
containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
if err != nil {
- return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/passwd", c.ID())
+ return "", "", fmt.Errorf("error looking up location of container %s /etc/passwd: %w", c.ID(), err)
}
f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
- return "", "", errors.Wrapf(err, "container %s", c.ID())
+ return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
}
defer f.Close()
if _, err := f.WriteString(passwdEntry); err != nil {
- return "", "", errors.Wrapf(err, "unable to append to container %s /etc/passwd", c.ID())
+ return "", "", fmt.Errorf("unable to append to container %s /etc/passwd: %w", c.ID(), err)
}
default:
logrus.Debugf("Not modifying container %s /etc/passwd", c.ID())
@@ -3030,7 +2975,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Making /etc/group for container %s", c.ID())
originGroupFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
if err != nil {
- return "", "", errors.Wrapf(err, "error creating path to container %s /etc/group", c.ID())
+ return "", "", fmt.Errorf("error creating path to container %s /etc/group: %w", c.ID(), err)
}
orig, err := ioutil.ReadFile(originGroupFile)
if err != nil && !os.IsNotExist(err) {
@@ -3038,7 +2983,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
}
groupFile, err := c.writeStringToStaticDir("group", string(orig)+groupEntry)
if err != nil {
- return "", "", errors.Wrapf(err, "failed to create temporary group file")
+ return "", "", fmt.Errorf("failed to create temporary group file: %w", err)
}
if err := os.Chmod(groupFile, 0644); err != nil {
return "", "", err
@@ -3048,17 +2993,17 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Modifying container %s /etc/group", c.ID())
containerGroup, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
if err != nil {
- return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/group", c.ID())
+ return "", "", fmt.Errorf("error looking up location of container %s /etc/group: %w", c.ID(), err)
}
f, err := os.OpenFile(containerGroup, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
- return "", "", errors.Wrapf(err, "container %s", c.ID())
+ return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
}
defer f.Close()
if _, err := f.WriteString(groupEntry); err != nil {
- return "", "", errors.Wrapf(err, "unable to append to container %s /etc/group", c.ID())
+ return "", "", fmt.Errorf("unable to append to container %s /etc/group: %w", c.ID(), err)
}
default:
logrus.Debugf("Not modifying container %s /etc/group", c.ID())
@@ -3093,7 +3038,7 @@ func (c *Container) expectPodCgroup() (bool, error) {
case cgroupManager == config.CgroupfsCgroupsManager:
return !rootless.IsRootless(), nil
default:
- return false, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup mode %s requested for pods", cgroupManager)
+ return false, fmt.Errorf("invalid cgroup mode %s requested for pods: %w", cgroupManager, define.ErrInvalidArg)
}
}
@@ -3108,7 +3053,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
case c.config.NoCgroups:
return "", nil
case c.config.CgroupsMode == cgroupSplit:
- selfCgroup, err := utils.GetOwnCgroup()
+ selfCgroup, err := utils.GetOwnCgroupDisallowRoot()
if err != nil {
return "", err
}
@@ -3130,7 +3075,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
logrus.Debugf("Setting Cgroup path for container %s to %s", c.ID(), cgroupPath)
return cgroupPath, nil
default:
- return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", cgroupManager)
+ return "", fmt.Errorf("invalid cgroup manager %s requested: %w", cgroupManager, define.ErrInvalidArg)
}
}
@@ -3141,7 +3086,7 @@ func (c *Container) copyTimezoneFile(zonePath string) (string, error) {
return "", err
}
if file.IsDir() {
- return "", errors.New("Invalid timezone: is a directory")
+ return "", errors.New("invalid timezone: is a directory")
}
src, err := os.Open(zonePath)
if err != nil {
@@ -3175,14 +3120,14 @@ func (c *Container) cleanupOverlayMounts() error {
func (c *Container) checkFileExistsInRootfs(file string) (bool, error) {
checkPath, err := securejoin.SecureJoin(c.state.Mountpoint, file)
if err != nil {
- return false, errors.Wrapf(err, "cannot create path to container %s file %q", c.ID(), file)
+ return false, fmt.Errorf("cannot create path to container %s file %q: %w", c.ID(), file, err)
}
stat, err := os.Stat(checkPath)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
- return false, errors.Wrapf(err, "container %s", c.ID())
+ return false, fmt.Errorf("container %s: %w", c.ID(), err)
}
if stat.IsDir() {
return false, nil
@@ -3218,7 +3163,7 @@ func (c *Container) createSecretMountDir() error {
func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
- return errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+ return fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
}
vol.lock.Lock()
@@ -3229,10 +3174,8 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
return err
}
- // TODO: For now, I've disabled chowning volumes owned by non-Podman
- // drivers. This may be safe, but it's really going to be a case-by-case
- // thing, I think - safest to leave disabled now and re-enable later if
- // there is a demand.
+ // Volumes owned by a volume driver are not chowned - we don't want to
+ // mess with a mount not managed by us.
if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false
@@ -3247,7 +3190,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
newPair, err := mappings.ToHost(p)
if err != nil {
- return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
+ return fmt.Errorf("error mapping user %d:%d: %w", uid, gid, err)
}
uid = newPair.UID
gid = newPair.GID
@@ -3282,7 +3225,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
return err
}
stat := st.Sys().(*syscall.Stat_t)
- atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) // nolint: unconvert
+ atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) //nolint: unconvert
if err := os.Chtimes(mountPoint, atime, st.ModTime()); err != nil {
return err
}
diff --git a/libpod/container_log.go b/libpod/container_log.go
index 7a9eb2dbf..a9e0fe065 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"time"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/libpod/logs"
"github.com/nxadm/tail/watch"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -35,9 +35,9 @@ func (r *Runtime) Log(ctx context.Context, containers []*Container, options *log
func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine, colorID int64) error {
switch c.LogDriver() {
case define.PassthroughLogging:
- return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot read logs")
+ return fmt.Errorf("this container is using the 'passthrough' log driver, cannot read logs: %w", define.ErrNoLogs)
case define.NoLogging:
- return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs")
+ return fmt.Errorf("this container is using the 'none' log driver, cannot read logs: %w", define.ErrNoLogs)
case define.JournaldLogging:
return c.readFromJournal(ctx, options, logChannel, colorID)
case define.JSONLogging:
@@ -47,7 +47,7 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh
case define.KubernetesLogging, "":
return c.readFromLogFile(ctx, options, logChannel, colorID)
default:
- return errors.Wrapf(define.ErrInternal, "unrecognized log driver %q, cannot read logs", c.LogDriver())
+ return fmt.Errorf("unrecognized log driver %q, cannot read logs: %w", c.LogDriver(), define.ErrInternal)
}
}
@@ -55,10 +55,10 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
t, tailLog, err := logs.GetLogFile(c.LogPath(), options)
if err != nil {
// If the log file does not exist, this is not fatal.
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
return nil
}
- return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
+ return fmt.Errorf("unable to read log file %s for %s : %w", c.ID(), c.LogPath(), err)
}
options.WaitGroup.Add(1)
if len(tailLog) > 0 {
@@ -75,7 +75,6 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
go func() {
defer options.WaitGroup.Done()
- var partial string
for line := range t.Lines {
select {
case <-ctx.Done():
@@ -89,13 +88,6 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
logrus.Errorf("Getting new log line: %v", err)
continue
}
- if nll.Partial() {
- partial += nll.Msg
- continue
- } else if !nll.Partial() && len(partial) > 0 {
- nll.Msg = partial + nll.Msg
- partial = ""
- }
nll.CID = c.ID()
nll.CName = c.Name()
nll.ColorID = colorID
@@ -111,7 +103,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
// until EOF.
state, err := c.State()
if err != nil || state != define.ContainerStateRunning {
- if err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
+ if err != nil && !errors.Is(err, define.ErrNoSuchCtr) {
logrus.Errorf("Getting container state: %v", err)
}
go func() {
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index deb726526..0686caed2 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"strings"
"time"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/libpod/logs"
"github.com/coreos/go-systemd/v22/journal"
"github.com/coreos/go-systemd/v22/sdjournal"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -49,7 +49,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
// We need the container's events in the same journal to guarantee
// consistency, see #10323.
if options.Follow && c.runtime.config.Engine.EventsLogger != "journald" {
- return errors.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
+ return fmt.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
}
journal, err := sdjournal.NewJournal()
@@ -63,21 +63,21 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
// Add the filters for events.
match := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
if err := journal.AddMatch(match.String()); err != nil {
- return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
}
match = sdjournal.Match{Field: "PODMAN_ID", Value: c.ID()}
if err := journal.AddMatch(match.String()); err != nil {
- return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
}
// Add the filter for logs. Note the disjunction so that we match
// either the events or the logs.
if err := journal.AddDisjunction(); err != nil {
- return errors.Wrap(err, "adding filter disjunction to journald logger")
+ return fmt.Errorf("adding filter disjunction to journald logger: %w", err)
}
match = sdjournal.Match{Field: "CONTAINER_ID_FULL", Value: c.ID()}
if err := journal.AddMatch(match.String()); err != nil {
- return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
}
if err := journal.SeekHead(); err != nil {
@@ -85,12 +85,12 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
}
// API requires Next() immediately after SeekHead().
if _, err := journal.Next(); err != nil {
- return errors.Wrap(err, "next journal")
+ return fmt.Errorf("next journal: %w", err)
}
// API requires a next|prev before getting a cursor.
if _, err := journal.Previous(); err != nil {
- return errors.Wrap(err, "previous journal")
+ return fmt.Errorf("previous journal: %w", err)
}
// Note that the initial cursor may not yet be ready, so we'll do an
@@ -111,7 +111,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
break
}
if cursorError != nil {
- return errors.Wrap(cursorError, "initial journal cursor")
+ return fmt.Errorf("initial journal cursor: %w", cursorError)
}
options.WaitGroup.Add(1)
@@ -255,7 +255,7 @@ func journalFormatterWithID(entry *sdjournal.JournalEntry) (string, error) {
id, ok := entry.Fields["CONTAINER_ID_FULL"]
if !ok {
- return "", fmt.Errorf("no CONTAINER_ID_FULL field present in journal entry")
+ return "", errors.New("no CONTAINER_ID_FULL field present in journal entry")
}
if len(id) > 12 {
id = id[:12]
@@ -290,14 +290,15 @@ func formatterPrefix(entry *sdjournal.JournalEntry) (string, error) {
output := fmt.Sprintf("%s ", tsString)
priority, ok := entry.Fields["PRIORITY"]
if !ok {
- return "", errors.Errorf("no PRIORITY field present in journal entry")
+ return "", errors.New("no PRIORITY field present in journal entry")
}
- if priority == journaldLogOut {
+ switch priority {
+ case journaldLogOut:
output += "stdout "
- } else if priority == journaldLogErr {
+ case journaldLogErr:
output += "stderr "
- } else {
- return "", errors.Errorf("unexpected PRIORITY field in journal entry")
+ default:
+ return "", errors.New("unexpected PRIORITY field in journal entry")
}
// if CONTAINER_PARTIAL_MESSAGE is defined, the log type is "P"
@@ -314,7 +315,7 @@ func formatterMessage(entry *sdjournal.JournalEntry) (string, error) {
// Finally, append the message
msg, ok := entry.Fields["MESSAGE"]
if !ok {
- return "", fmt.Errorf("no MESSAGE field present in journal entry")
+ return "", errors.New("no MESSAGE field present in journal entry")
}
msg = strings.TrimSuffix(msg, "\n")
return msg, nil
diff --git a/libpod/container_log_unsupported.go b/libpod/container_log_unsupported.go
index c84a578cc..bb74a810d 100644
--- a/libpod/container_log_unsupported.go
+++ b/libpod/container_log_unsupported.go
@@ -5,16 +5,16 @@ package libpod
import (
"context"
+ "fmt"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/logs"
- "github.com/pkg/errors"
)
func (c *Container) readFromJournal(_ context.Context, _ *logs.LogOptions, _ chan *logs.LogLine, colorID int64) error {
- return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
+ return fmt.Errorf("journald logging only enabled with systemd on linux: %w", define.ErrOSNotSupported)
}
func (c *Container) initializeJournal(ctx context.Context) error {
- return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
+ return fmt.Errorf("journald logging only enabled with systemd on linux: %w", define.ErrOSNotSupported)
}
diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go
index 80a3749f5..35622d623 100644
--- a/libpod/container_path_resolution.go
+++ b/libpod/container_path_resolution.go
@@ -1,12 +1,12 @@
package libpod
import (
+ "fmt"
"path/filepath"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -65,7 +65,7 @@ func (c *Container) resolvePath(mountPoint string, containerPath string) (string
return "", "", err
}
if mountPoint == "" {
- return "", "", errors.Errorf("volume %s is not mounted, cannot copy into it", volume.Name())
+ return "", "", fmt.Errorf("volume %s is not mounted, cannot copy into it", volume.Name())
}
// We found a matching volume for searchPath. We now
diff --git a/libpod/container_stat_linux.go b/libpod/container_stat_linux.go
index bbe3edbb3..72aabb516 100644
--- a/libpod/container_stat_linux.go
+++ b/libpod/container_stat_linux.go
@@ -4,6 +4,8 @@
package libpod
import (
+ "errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -11,7 +13,6 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/copy"
- "github.com/pkg/errors"
)
// statInsideMount stats the specified path *inside* the container's mount and PID
@@ -150,10 +151,10 @@ func secureStat(root string, path string) (*copier.StatForItem, error) {
}
if len(globStats) != 1 {
- return nil, errors.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
+ return nil, fmt.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
}
if len(globStats) != 1 {
- return nil, errors.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results))
+ return nil, fmt.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results))
}
// NOTE: the key in the map differ from `glob` when hitting symlink.
@@ -167,7 +168,7 @@ func secureStat(root string, path string) (*copier.StatForItem, error) {
if stat.IsSymlink {
target, err := copier.Eval(root, path, copier.EvalOptions{})
if err != nil {
- return nil, errors.Wrap(err, "error evaluating symlink in container")
+ return nil, fmt.Errorf("error evaluating symlink in container: %w", err)
}
// Need to make sure the symlink is relative to the root!
target = strings.TrimPrefix(target, root)
diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go
index 9b3dbc873..5571edf73 100644
--- a/libpod/container_top_linux.go
+++ b/libpod/container_top_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"bufio"
+ "errors"
"fmt"
"os"
"strconv"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/psgo"
"github.com/google/shlex"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,15 +22,15 @@ import (
// []string for output
func (c *Container) Top(descriptors []string) ([]string, error) {
if c.config.NoCgroups {
- return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID())
+ return nil, fmt.Errorf("cannot run top on container %s as it did not create a cgroup: %w", c.ID(), define.ErrNoCgroups)
}
conStat, err := c.State()
if err != nil {
- return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID())
+ return nil, fmt.Errorf("unable to look up state for %s: %w", c.ID(), err)
}
if conStat != define.ContainerStateRunning {
- return nil, errors.Errorf("top can only be used on running containers")
+ return nil, errors.New("top can only be used on running containers")
}
// Also support comma-separated input.
@@ -59,7 +59,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
for _, d := range descriptors {
shSplit, err := shlex.Split(d)
if err != nil {
- return nil, fmt.Errorf("parsing ps args: %v", err)
+ return nil, fmt.Errorf("parsing ps args: %w", err)
}
for _, s := range shSplit {
if s != "" {
@@ -70,7 +70,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
output, err = c.execPS(psDescriptors)
if err != nil {
- return nil, errors.Wrapf(err, "error executing ps(1) in the container")
+ return nil, fmt.Errorf("error executing ps(1) in the container: %w", err)
}
// Trick: filter the ps command from the output instead of
@@ -96,7 +96,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
// For more details, please refer to github.com/containers/psgo.
func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, error) {
pid := strconv.Itoa(c.state.PID)
- // TODO: psgo returns a [][]string to give users the ability to apply
+ // NOTE: psgo returns a [][]string to give users the ability to apply
// filters on the data. We need to change the API here
// to return a [][]string if we want to make use of
// filtering.
@@ -157,7 +157,7 @@ func (c *Container) execPS(args []string) ([]string, error) {
if err != nil {
return nil, err
} else if ec != 0 {
- return nil, errors.Errorf("Runtime failed with exit status: %d and output: %s", ec, strings.Join(stderr, " "))
+ return nil, fmt.Errorf("runtime failed with exit status: %d and output: %s", ec, strings.Join(stderr, " "))
}
if logrus.GetLevel() >= logrus.DebugLevel {
diff --git a/libpod/container_validate.go b/libpod/container_validate.go
index cfbdd2b1e..e280c60d2 100644
--- a/libpod/container_validate.go
+++ b/libpod/container_validate.go
@@ -5,7 +5,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// Validate that the configuration of a container is valid.
@@ -16,17 +15,17 @@ func (c *Container) validate() error {
// If one of RootfsImageIDor RootfsImageName are set, both must be set.
if (imageIDSet || imageNameSet) && !(imageIDSet && imageNameSet) {
- return errors.Wrapf(define.ErrInvalidArg, "both RootfsImageName and RootfsImageID must be set if either is set")
+ return fmt.Errorf("both RootfsImageName and RootfsImageID must be set if either is set: %w", define.ErrInvalidArg)
}
// Cannot set RootfsImageID and Rootfs at the same time
if imageIDSet && rootfsSet {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set both an image ID and rootfs for a container")
+ return fmt.Errorf("cannot set both an image ID and rootfs for a container: %w", define.ErrInvalidArg)
}
// Must set at least one of RootfsImageID or Rootfs
if !(imageIDSet || rootfsSet) {
- return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
+ return fmt.Errorf("must set root filesystem source to either image or rootfs: %w", define.ErrInvalidArg)
}
// A container cannot be marked as an infra and service container at
@@ -38,62 +37,62 @@ func (c *Container) validate() error {
// Cannot make a network namespace if we are joining another container's
// network namespace
if c.config.CreateNetNS && c.config.NetNsCtr != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot both create a network namespace and join another container's network namespace")
+ return fmt.Errorf("cannot both create a network namespace and join another container's network namespace: %w", define.ErrInvalidArg)
}
if c.config.CgroupsMode == cgroupSplit && c.config.CgroupParent != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot specify --cgroup-mode=split with a cgroup-parent")
+ return fmt.Errorf("cannot specify --cgroup-mode=split with a cgroup-parent: %w", define.ErrInvalidArg)
}
// Not creating cgroups has a number of requirements, mostly related to
// the PID namespace.
if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
if c.config.PIDNsCtr != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot join another container's PID namespace if not creating cgroups")
+ return fmt.Errorf("cannot join another container's PID namespace if not creating cgroups: %w", define.ErrInvalidArg)
}
if c.config.CgroupParent != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set cgroup parent if not creating cgroups")
+ return fmt.Errorf("cannot set cgroup parent if not creating cgroups: %w", define.ErrInvalidArg)
}
// Ensure we have a PID namespace
if c.config.Spec.Linux == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide Linux namespace configuration in OCI spec when using NoCgroups")
+ return fmt.Errorf("must provide Linux namespace configuration in OCI spec when using NoCgroups: %w", define.ErrInvalidArg)
}
foundPid := false
for _, ns := range c.config.Spec.Linux.Namespaces {
if ns.Type == spec.PIDNamespace {
foundPid = true
if ns.Path != "" {
- return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace - cannot use another")
+ return fmt.Errorf("containers not creating Cgroups must create a private PID namespace - cannot use another: %w", define.ErrInvalidArg)
}
break
}
}
if !foundPid {
- return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace")
+ return fmt.Errorf("containers not creating Cgroups must create a private PID namespace: %w", define.ErrInvalidArg)
}
}
// Can only set static IP or MAC is creating a network namespace.
if !c.config.CreateNetNS && (c.config.StaticIP != nil || c.config.StaticMAC != nil) {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if not creating a network namespace")
+ return fmt.Errorf("cannot set static IP or MAC address if not creating a network namespace: %w", define.ErrInvalidArg)
}
// Cannot set static IP or MAC if joining >1 CNI network.
if len(c.config.Networks) > 1 && (c.config.StaticIP != nil || c.config.StaticMAC != nil) {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if joining more than one network")
+ return fmt.Errorf("cannot set static IP or MAC address if joining more than one network: %w", define.ErrInvalidArg)
}
// Using image resolv.conf conflicts with various DNS settings.
if c.config.UseImageResolvConf &&
(len(c.config.DNSSearch) > 0 || len(c.config.DNSServer) > 0 ||
len(c.config.DNSOption) > 0) {
- return errors.Wrapf(define.ErrInvalidArg, "cannot configure DNS options if using image's resolv.conf")
+ return fmt.Errorf("cannot configure DNS options if using image's resolv.conf: %w", define.ErrInvalidArg)
}
if c.config.UseImageHosts && len(c.config.HostAdd) > 0 {
- return errors.Wrapf(define.ErrInvalidArg, "cannot add to /etc/hosts if using image's /etc/hosts")
+ return fmt.Errorf("cannot add to /etc/hosts if using image's /etc/hosts: %w", define.ErrInvalidArg)
}
// Check named volume, overlay volume and image volume destination conflist
@@ -102,7 +101,7 @@ func (c *Container) validate() error {
// Don't check if they already exist.
// If they don't we will automatically create them.
if _, ok := destinations[vol.Dest]; ok {
- return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
}
destinations[vol.Dest] = true
}
@@ -110,7 +109,7 @@ func (c *Container) validate() error {
// Don't check if they already exist.
// If they don't we will automatically create them.
if _, ok := destinations[vol.Dest]; ok {
- return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
}
destinations[vol.Dest] = true
}
@@ -118,7 +117,7 @@ func (c *Container) validate() error {
// Don't check if they already exist.
// If they don't we will automatically create them.
if _, ok := destinations[vol.Dest]; ok {
- return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
}
destinations[vol.Dest] = true
}
@@ -126,13 +125,13 @@ func (c *Container) validate() error {
// If User in the OCI spec is set, require that c.config.User is set for
// security reasons (a lot of our code relies on c.config.User).
if c.config.User == "" && (c.config.Spec.Process.User.UID != 0 || c.config.Spec.Process.User.GID != 0) {
- return errors.Wrapf(define.ErrInvalidArg, "please set User explicitly via WithUser() instead of in OCI spec directly")
+ return fmt.Errorf("please set User explicitly via WithUser() instead of in OCI spec directly: %w", define.ErrInvalidArg)
}
// Init-ctrs must be used inside a Pod. Check if a init container type is
// passed and if no pod is passed
if len(c.config.InitContainerType) > 0 && len(c.config.Pod) < 1 {
- return errors.Wrap(define.ErrInvalidArg, "init containers must be created in a pod")
+ return fmt.Errorf("init containers must be created in a pod: %w", define.ErrInvalidArg)
}
return nil
}
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index e7b82d654..ccc4ae00f 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -259,9 +259,7 @@ type HealthCheckLog struct {
// as possible from the spec and container config.
// Some things cannot be inferred. These will be populated by spec annotations
// (if available).
-// Field names are fixed for compatibility and cannot be changed.
-// As such, silence lint warnings about them.
-//nolint
+//nolint:revive,stylecheck // Field names are fixed for compatibility and cannot be changed.
type InspectContainerHostConfig struct {
// Binds contains an array of user-added mounts.
// Both volume mounts and named volumes are included.
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
index 9ad3aec08..00080ef37 100644
--- a/libpod/define/containerstate.go
+++ b/libpod/define/containerstate.go
@@ -1,9 +1,8 @@
package define
import (
+ "fmt"
"time"
-
- "github.com/pkg/errors"
)
// ContainerStatus represents the current state of a container
@@ -91,7 +90,7 @@ func StringToContainerStatus(status string) (ContainerStatus, error) {
case ContainerStateRemoving.String():
return ContainerStateRemoving, nil
default:
- return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
+ return ContainerStateUnknown, fmt.Errorf("unknown container state: %s: %w", status, ErrInvalidArg)
}
}
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index f5a7c73e5..b858e1989 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/containers/common/libnetwork/types"
+ "github.com/containers/common/pkg/util"
)
var (
@@ -24,6 +25,10 @@ var (
// not exist.
ErrNoSuchExecSession = errors.New("no such exec session")
+ // ErrNoSuchExitCode indicates that the requested container exit code
+ // does not exist.
+ ErrNoSuchExitCode = errors.New("no such exit code")
+
// ErrDepExists indicates that the current object has dependencies and
// cannot be removed before them.
ErrDepExists = errors.New("dependency exists")
@@ -88,7 +93,7 @@ var (
// ErrDetach indicates that an attach session was manually detached by
// the user.
- ErrDetach = errors.New("detached from container")
+ ErrDetach = util.ErrDetach
// ErrWillDeadlock indicates that the requested operation will cause a
// deadlock. This is usually caused by upgrade issues, and is resolved
diff --git a/libpod/define/exec_codes.go b/libpod/define/exec_codes.go
index f94616b33..3f2da4910 100644
--- a/libpod/define/exec_codes.go
+++ b/libpod/define/exec_codes.go
@@ -1,9 +1,9 @@
package define
import (
+ "errors"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -23,10 +23,10 @@ const (
// has a predefined exit code associated. If so, it returns that, otherwise it returns
// the exit code originally stated in libpod.Exec()
func TranslateExecErrorToExitCode(originalEC int, err error) int {
- if errors.Cause(err) == ErrOCIRuntimePermissionDenied {
+ if errors.Is(err, ErrOCIRuntimePermissionDenied) {
return ExecErrorCodeCannotInvoke
}
- if errors.Cause(err) == ErrOCIRuntimeNotFound {
+ if errors.Is(err, ErrOCIRuntimeNotFound) {
return ExecErrorCodeNotFound
}
return originalEC
diff --git a/libpod/define/healthchecks.go b/libpod/define/healthchecks.go
index bde449d30..f71274350 100644
--- a/libpod/define/healthchecks.go
+++ b/libpod/define/healthchecks.go
@@ -47,3 +47,13 @@ const (
// DefaultHealthCheckTimeout default value
DefaultHealthCheckTimeout = "30s"
)
+
+// HealthConfig.Test options
+const (
+ // HealthConfigTestNone disables healthcheck
+ HealthConfigTestNone = "NONE"
+ // HealthConfigTestCmd execs arguments directly
+ HealthConfigTestCmd = "CMD"
+ // HealthConfigTestCmdShell runs commands with the system's default shell
+ HealthConfigTestCmdShell = "CMD-SHELL"
+)
diff --git a/libpod/define/info.go b/libpod/define/info.go
index 911fa5c03..c716bec7b 100644
--- a/libpod/define/info.go
+++ b/libpod/define/info.go
@@ -14,7 +14,7 @@ type Info struct {
Version Version `json:"version"`
}
-// HostInfo describes the libpod host
+// SecurityInfo describes the libpod host
type SecurityInfo struct {
AppArmorEnabled bool `json:"apparmorEnabled"`
DefaultCapabilities string `json:"capabilities"`
@@ -64,8 +64,7 @@ type RemoteSocket struct {
Exists bool `json:"exists,omitempty"`
}
-// SlirpInfo describes the slirp executable that
-// is being being used.
+// SlirpInfo describes the slirp executable that is being used
type SlirpInfo struct {
Executable string `json:"executable"`
Package string `json:"package"`
@@ -78,8 +77,7 @@ type IDMappings struct {
UIDMap []idtools.IDMap `json:"uidmap"`
}
-// DistributionInfo describes the host distribution
-// for libpod
+// DistributionInfo describes the host distribution for libpod
type DistributionInfo struct {
Distribution string `json:"distribution"`
Variant string `json:"variant,omitempty"`
@@ -141,8 +139,8 @@ type Plugins struct {
Volume []string `json:"volume"`
Network []string `json:"network"`
Log []string `json:"log"`
- // FIXME what should we do with Authorization, docker seems to return nothing by default
- // Authorization []string `json:"authorization"`
+ // Authorization is provided for compatibility, will always be nil as Podman has no daemon
+ Authorization []string `json:"authorization"`
}
type CPUUsage struct {
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index 219ffade2..2afef48c4 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -69,6 +69,8 @@ type InspectPodData struct {
VolumesFrom []string `json:"volumes_from,omitempty"`
// SecurityOpt contains the specified security labels and related SELinux information
SecurityOpts []string `json:"security_opt,omitempty"`
+ // MemoryLimit contains the specified cgroup memory limit for the pod
+ MemoryLimit uint64 `json:"memory_limit,omitempty"`
}
// InspectPodInfraConfig contains the configuration of the pod's infra
@@ -82,6 +84,7 @@ type InspectPodInfraConfig struct {
HostNetwork bool
// StaticIP is a static IPv4 that will be assigned to the infra
// container and then used by the pod.
+ // swagger:strfmt ipv4
StaticIP net.IP
// StaticMAC is a static MAC address that will be assigned to the infra
// container and then used by the pod.
@@ -119,6 +122,8 @@ type InspectPodInfraConfig struct {
PidNS string `json:"pid_ns,omitempty"`
// UserNS is the usernamespace that all the containers in the pod will join.
UserNS string `json:"userns,omitempty"`
+ // UtsNS is the uts namespace that all containers in the pod will join
+ UtsNS string `json:"uts_ns,omitempty"`
}
// InspectPodContainerInfo contains information on a container in a pod.
diff --git a/libpod/define/terminal.go b/libpod/define/terminal.go
deleted file mode 100644
index ce8955544..000000000
--- a/libpod/define/terminal.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package define
-
-// TerminalSize represents the width and height of a terminal.
-type TerminalSize struct {
- Width uint16
- Height uint16
-}
diff --git a/libpod/define/volume_inspect.go b/libpod/define/volume_inspect.go
index fac179176..f731a8735 100644
--- a/libpod/define/volume_inspect.go
+++ b/libpod/define/volume_inspect.go
@@ -56,4 +56,12 @@ type InspectVolumeData struct {
// a container, the container will chown the volume to the container process
// UID/GID.
NeedsChown bool `json:"NeedsChown,omitempty"`
+ // Timeout is the specified driver timeout if given
+ Timeout int `json:"Timeout,omitempty"`
+}
+
+type VolumeReload struct {
+ Added []string
+ Removed []string
+ Errors []error
}
diff --git a/libpod/diff.go b/libpod/diff.go
index 86fa063ec..8f0ad9355 100644
--- a/libpod/diff.go
+++ b/libpod/diff.go
@@ -1,10 +1,11 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/layers"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
)
var initInodes = map[string]bool{
@@ -76,5 +77,5 @@ func (r *Runtime) getLayerID(id string, diffType define.DiffType) (string, error
}
lastErr = err
}
- return "", errors.Wrapf(lastErr, "%s not found", id)
+ return "", fmt.Errorf("%s not found: %w", id, lastErr)
}
diff --git a/libpod/events.go b/libpod/events.go
index f09d8402a..c9e4c9d26 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -6,7 +6,6 @@ import (
"sync"
"github.com/containers/podman/v4/libpod/events"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -33,6 +32,16 @@ func (c *Container) newContainerEvent(status events.Status) {
Attributes: c.Labels(),
}
+ // if the current event is a HealthStatus event, we need to get the current
+ // status of the container to pass to the event
+ if status == events.HealthStatus {
+ containerHealthStatus, err := c.healthCheckStatus()
+ if err != nil {
+ e.HealthStatus = fmt.Sprintf("%v", err)
+ }
+ e.HealthStatus = containerHealthStatus
+ }
+
if err := c.runtime.eventer.Write(e); err != nil {
logrus.Errorf("Unable to write pod event: %q", err)
}
@@ -151,6 +160,9 @@ func (r *Runtime) GetEvents(ctx context.Context, filters []string) ([]*events.Ev
// GetLastContainerEvent takes a container name or ID and an event status and returns
// the last occurrence of the container event
func (r *Runtime) GetLastContainerEvent(ctx context.Context, nameOrID string, containerEvent events.Status) (*events.Event, error) {
+ // FIXME: events should be read in reverse order!
+ // https://github.com/containers/podman/issues/14579
+
// check to make sure the event.Status is valid
if _, err := events.StringToStatus(containerEvent.String()); err != nil {
return nil, err
@@ -165,7 +177,7 @@ func (r *Runtime) GetLastContainerEvent(ctx context.Context, nameOrID string, co
return nil, err
}
if len(containerEvents) < 1 {
- return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String())
+ return nil, fmt.Errorf("%s not found: %w", containerEvent.String(), events.ErrEventNotFound)
}
// return the last element in the slice
return containerEvents[len(containerEvents)-1], nil
@@ -188,7 +200,7 @@ func (r *Runtime) GetExecDiedEvent(ctx context.Context, nameOrID, execSessionID
// There *should* only be one event maximum.
// But... just in case... let's not blow up if there's more than one.
if len(containerEvents) < 1 {
- return nil, errors.Wrapf(events.ErrEventNotFound, "exec died event for session %s (container %s) not found", execSessionID, nameOrID)
+ return nil, fmt.Errorf("exec died event for session %s (container %s) not found: %w", execSessionID, nameOrID, events.ErrEventNotFound)
}
return containerEvents[len(containerEvents)-1], nil
}
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 2e7016136..4ea45a00e 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -2,9 +2,8 @@ package events
import (
"context"
+ "errors"
"time"
-
- "github.com/pkg/errors"
)
// EventerType ...
@@ -40,6 +39,8 @@ type Event struct {
Time time.Time
// Type of event that occurred
Type Type
+ // Health status of the current container
+ HealthStatus string `json:"health_status,omitempty"`
Details
}
@@ -141,6 +142,8 @@ const (
Exited Status = "died"
// Export ...
Export Status = "export"
+ // HealthStatus ...
+ HealthStatus Status = "health_status"
// History ...
History Status = "history"
// Import ...
diff --git a/libpod/events/events.go b/libpod/events/events.go
index e83c2efee..764481e51 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -2,16 +2,16 @@ package events
import (
"encoding/json"
+ "errors"
"fmt"
"time"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
)
// ErrNoJournaldLogging indicates that there is no journald logging
// supported (requires libsystemd)
-var ErrNoJournaldLogging = errors.New("No support for journald logging")
+var ErrNoJournaldLogging = errors.New("no support for journald logging")
// String returns a string representation of EventerType
func (et EventerType) String() string {
@@ -76,7 +76,7 @@ func (e *Event) ToHumanReadable(truncate bool) string {
}
switch e.Type {
case Container, Pod:
- humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name)
+ humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s, health_status=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name, e.HealthStatus)
// check if the container has labels and add it to the output
if len(e.Attributes) > 0 {
for k, v := range e.Attributes {
@@ -140,12 +140,10 @@ func StringToType(name string) (Type, error) {
case "":
return "", ErrEventTypeBlank
}
- return "", errors.Errorf("unknown event type %q", name)
+ return "", fmt.Errorf("unknown event type %q", name)
}
// StringToStatus converts a string to an Event Status
-// TODO if we add more events, we might consider a go-generator to
-// create the switch statement
func StringToStatus(name string) (Status, error) {
switch name {
case Attach.String():
@@ -170,6 +168,8 @@ func StringToStatus(name string) (Status, error) {
return Exited, nil
case Export.String():
return Export, nil
+ case HealthStatus.String():
+ return HealthStatus, nil
case History.String():
return History, nil
case Import.String():
@@ -225,5 +225,5 @@ func StringToStatus(name string) (Status, error) {
case Untag.String():
return Untag, nil
}
- return "", errors.Errorf("unknown event status %q", name)
+ return "", fmt.Errorf("unknown event status %q", name)
}
diff --git a/libpod/events/events_linux.go b/libpod/events/events_linux.go
index 4320f2190..e7801af5b 100644
--- a/libpod/events/events_linux.go
+++ b/libpod/events/events_linux.go
@@ -1,9 +1,9 @@
package events
import (
+ "fmt"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -14,7 +14,7 @@ func NewEventer(options EventerOptions) (Eventer, error) {
case strings.ToUpper(Journald.String()):
eventer, err := newEventJournalD(options)
if err != nil {
- return nil, errors.Wrapf(err, "eventer creation")
+ return nil, fmt.Errorf("eventer creation: %w", err)
}
return eventer, nil
case strings.ToUpper(LogFile.String()):
@@ -24,6 +24,6 @@ func NewEventer(options EventerOptions) (Eventer, error) {
case strings.ToUpper(Memory.String()):
return NewMemoryEventer(), nil
default:
- return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
+ return nil, fmt.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
}
}
diff --git a/libpod/events/events_unsupported.go b/libpod/events/events_unsupported.go
index 25c175524..d766402a9 100644
--- a/libpod/events/events_unsupported.go
+++ b/libpod/events/events_unsupported.go
@@ -3,7 +3,7 @@
package events
-import "github.com/pkg/errors"
+import "errors"
// NewEventer creates an eventer based on the eventer type
func NewEventer(options EventerOptions) (Eventer, error) {
diff --git a/libpod/events/filters.go b/libpod/events/filters.go
index 64c162db2..d5b96e7ec 100644
--- a/libpod/events/filters.go
+++ b/libpod/events/filters.go
@@ -1,11 +1,11 @@
package events
import (
+ "fmt"
"strings"
"time"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error) {
@@ -74,7 +74,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error
return found
}, nil
}
- return nil, errors.Errorf("%s is an invalid filter", filter)
+ return nil, fmt.Errorf("%s is an invalid filter", filter)
}
func generateEventSinceOption(timeSince time.Time) func(e *Event) bool {
@@ -92,7 +92,7 @@ func generateEventUntilOption(timeUntil time.Time) func(e *Event) bool {
func parseFilter(filter string) (string, string, error) {
filterSplit := strings.SplitN(filter, "=", 2)
if len(filterSplit) != 2 {
- return "", "", errors.Errorf("%s is an invalid filter", filter)
+ return "", "", fmt.Errorf("%s is an invalid filter", filter)
}
return filterSplit[0], filterSplit[1], nil
}
@@ -137,7 +137,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E
if len(since) > 0 {
timeSince, err := util.ParseInputTime(since, true)
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert since time of %s", since)
+ return nil, fmt.Errorf("unable to convert since time of %s: %w", since, err)
}
filterFunc := generateEventSinceOption(timeSince)
filterMap["since"] = []EventFilter{filterFunc}
@@ -146,7 +146,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E
if len(until) > 0 {
timeUntil, err := util.ParseInputTime(until, false)
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert until time of %s", until)
+ return nil, fmt.Errorf("unable to convert until time of %s: %w", until, err)
}
filterFunc := generateEventUntilOption(timeUntil)
filterMap["until"] = []EventFilter{filterFunc}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 866042a4c..0a0a768d0 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -6,13 +6,14 @@ package events
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"strconv"
"time"
"github.com/containers/podman/v4/pkg/util"
"github.com/coreos/go-systemd/v22/journal"
"github.com/coreos/go-systemd/v22/sdjournal"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -58,13 +59,14 @@ func (e EventJournalD) Write(ee Event) error {
}
m["PODMAN_LABELS"] = string(b)
}
+ m["PODMAN_HEALTH_STATUS"] = ee.HealthStatus
case Network:
m["PODMAN_ID"] = ee.ID
m["PODMAN_NETWORK_NAME"] = ee.Network
case Volume:
m["PODMAN_NAME"] = ee.Name
}
- return journal.Send(string(ee.ToHumanReadable(false)), journal.PriInfo, m)
+ return journal.Send(ee.ToHumanReadable(false), journal.PriInfo, m)
}
// Read reads events from the journal and sends qualified events to the event channel
@@ -72,7 +74,7 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "failed to parse event filters")
+ return fmt.Errorf("failed to parse event filters: %w", err)
}
var untilTime time.Time
@@ -95,29 +97,29 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
// match only podman journal entries
podmanJournal := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
if err := j.AddMatch(podmanJournal.String()); err != nil {
- return errors.Wrap(err, "failed to add journal filter for event log")
+ return fmt.Errorf("failed to add journal filter for event log: %w", err)
}
if len(options.Since) == 0 && len(options.Until) == 0 && options.Stream {
if err := j.SeekTail(); err != nil {
- return errors.Wrap(err, "failed to seek end of journal")
+ return fmt.Errorf("failed to seek end of journal: %w", err)
}
// After SeekTail calling Next moves to a random entry.
// To prevent this we have to call Previous first.
// see: https://bugs.freedesktop.org/show_bug.cgi?id=64614
if _, err := j.Previous(); err != nil {
- return errors.Wrap(err, "failed to move journal cursor to previous entry")
+ return fmt.Errorf("failed to move journal cursor to previous entry: %w", err)
}
}
// the api requires a next|prev before getting a cursor
if _, err := j.Next(); err != nil {
- return errors.Wrap(err, "failed to move journal cursor to next entry")
+ return fmt.Errorf("failed to move journal cursor to next entry: %w", err)
}
prevCursor, err := j.GetCursor()
if err != nil {
- return errors.Wrap(err, "failed to get journal cursor")
+ return fmt.Errorf("failed to get journal cursor: %w", err)
}
for {
select {
@@ -129,11 +131,11 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
}
if _, err := j.Next(); err != nil {
- return errors.Wrap(err, "failed to move journal cursor to next entry")
+ return fmt.Errorf("failed to move journal cursor to next entry: %w", err)
}
newCursor, err := j.GetCursor()
if err != nil {
- return errors.Wrap(err, "failed to get journal cursor")
+ return fmt.Errorf("failed to get journal cursor: %w", err)
}
if prevCursor == newCursor {
if !options.Stream || (len(options.Until) > 0 && time.Now().After(untilTime)) {
@@ -150,14 +152,14 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
entry, err := j.GetEntry()
if err != nil {
- return errors.Wrap(err, "failed to read journal entry")
+ return fmt.Errorf("failed to read journal entry: %w", err)
}
newEvent, err := newEventFromJournalEntry(entry)
if err != nil {
// We can't decode this event.
// Don't fail hard - that would make events unusable.
// Instead, log and continue.
- if errors.Cause(err) != ErrEventTypeBlank {
+ if !errors.Is(err, ErrEventTypeBlank) {
logrus.Errorf("Unable to decode event: %v", err)
}
continue
@@ -167,10 +169,9 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
}
}
return nil
-
}
-func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { //nolint
+func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) {
newEvent := Event{}
eventType, err := StringToType(entry.Fields["PODMAN_TYPE"])
if err != nil {
@@ -214,6 +215,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { /
newEvent.Details = Details{Attributes: labels}
}
}
+ newEvent.HealthStatus = entry.Fields["PODMAN_HEALTH_STATUS"]
case Network:
newEvent.ID = entry.Fields["PODMAN_ID"]
newEvent.Network = entry.Fields["PODMAN_NETWORK_NAME"]
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 21fdd8027..4dafd8600 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -6,6 +6,7 @@ package events
import (
"bufio"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -16,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage/pkg/lockfile"
"github.com/nxadm/tail"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -90,7 +90,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "failed to parse event filters")
+ return fmt.Errorf("failed to parse event filters: %w", err)
}
t, err := e.getTail(options)
if err != nil {
@@ -136,7 +136,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
case Image, Volume, Pod, System, Container, Network:
// no-op
default:
- return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
+ return fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
}
if copy && applyFilters(event, filterMap) {
options.EventChannel <- event
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index 40af9aec3..9b9d12b17 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -2,6 +2,8 @@ package libpod
import (
"bufio"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -9,7 +11,6 @@ import (
"time"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -26,7 +27,7 @@ const (
func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) {
container, err := r.LookupContainer(name)
if err != nil {
- return define.HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
+ return define.HealthCheckContainerNotFound, fmt.Errorf("unable to look up %s to perform a health check: %w", name, err)
}
hcStatus, err := checkHealthCheckCanBeRun(container)
if err == nil {
@@ -44,14 +45,14 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
)
hcCommand := c.HealthCheckConfig().Test
if len(hcCommand) < 1 {
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
}
switch hcCommand[0] {
- case "", "NONE":
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
- case "CMD":
+ case "", define.HealthConfigTestNone:
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
+ case define.HealthConfigTestCmd:
newCommand = hcCommand[1:]
- case "CMD-SHELL":
+ case define.HealthConfigTestCmdShell:
// TODO: SHELL command from image not available in Container - use Docker default
newCommand = []string{"/bin/sh", "-c", strings.Join(hcCommand[1:], " ")}
default:
@@ -59,11 +60,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
newCommand = hcCommand
}
if len(newCommand) < 1 || newCommand[0] == "" {
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
}
rPipe, wPipe, err := os.Pipe()
if err != nil {
- return define.HealthCheckInternalError, errors.Wrapf(err, "unable to create pipe for healthcheck session")
+ return define.HealthCheckInternalError, fmt.Errorf("unable to create pipe for healthcheck session: %w", err)
}
defer wPipe.Close()
defer rPipe.Close()
@@ -90,13 +91,12 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
hcResult := define.HealthCheckSuccess
config := new(ExecConfig)
config.Command = newCommand
- exitCode, hcErr := c.Exec(config, streams, nil)
+ exitCode, hcErr := c.exec(config, streams, nil, true)
if hcErr != nil {
- errCause := errors.Cause(hcErr)
hcResult = define.HealthCheckFailure
- if errCause == define.ErrOCIRuntimeNotFound ||
- errCause == define.ErrOCIRuntimePermissionDenied ||
- errCause == define.ErrOCIRuntime {
+ if errors.Is(hcErr, define.ErrOCIRuntimeNotFound) ||
+ errors.Is(hcErr, define.ErrOCIRuntimePermissionDenied) ||
+ errors.Is(hcErr, define.ErrOCIRuntime) {
returnCode = 1
hcErr = nil
} else {
@@ -125,11 +125,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {
returnCode = -1
hcResult = define.HealthCheckFailure
- hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
+ hcErr = fmt.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
}
hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)
if err := c.updateHealthCheckLog(hcl, inStartPeriod); err != nil {
- return hcResult, errors.Wrapf(err, "unable to update health check log %s for %s", c.healthCheckLogPath(), c.ID())
+ return hcResult, fmt.Errorf("unable to update health check log %s for %s: %w", c.healthCheckLogPath(), c.ID(), err)
}
return hcResult, hcErr
}
@@ -140,10 +140,10 @@ func checkHealthCheckCanBeRun(c *Container) (define.HealthCheckStatus, error) {
return define.HealthCheckInternalError, err
}
if cstate != define.ContainerStateRunning {
- return define.HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
+ return define.HealthCheckContainerStopped, fmt.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
}
return define.HealthCheckDefined, nil
}
@@ -167,7 +167,7 @@ func (c *Container) updateHealthStatus(status string) error {
healthCheck.Status = status
newResults, err := json.Marshal(healthCheck)
if err != nil {
- return errors.Wrapf(err, "unable to marshall healthchecks for writing status")
+ return fmt.Errorf("unable to marshall healthchecks for writing status: %w", err)
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
@@ -201,7 +201,7 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio
}
newResults, err := json.Marshal(healthCheck)
if err != nil {
- return errors.Wrapf(err, "unable to marshall healthchecks for writing")
+ return fmt.Errorf("unable to marshall healthchecks for writing: %w", err)
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
@@ -222,28 +222,37 @@ func (c *Container) getHealthCheckLog() (define.HealthCheckResults, error) {
}
b, err := ioutil.ReadFile(c.healthCheckLogPath())
if err != nil {
- return healthCheck, errors.Wrap(err, "failed to read health check log file")
+ return healthCheck, fmt.Errorf("failed to read health check log file: %w", err)
}
if err := json.Unmarshal(b, &healthCheck); err != nil {
- return healthCheck, errors.Wrapf(err, "failed to unmarshal existing healthcheck results in %s", c.healthCheckLogPath())
+ return healthCheck, fmt.Errorf("failed to unmarshal existing healthcheck results in %s: %w", c.healthCheckLogPath(), err)
}
return healthCheck, nil
}
// HealthCheckStatus returns the current state of a container with a healthcheck
func (c *Container) HealthCheckStatus() (string, error) {
- if !c.HasHealthCheck() {
- return "", errors.Errorf("container %s has no defined healthcheck", c.ID())
- }
c.lock.Lock()
defer c.lock.Unlock()
+ return c.healthCheckStatus()
+}
+
+// Internal function to return the current state of a container with a healthcheck.
+// This function does not lock the container.
+func (c *Container) healthCheckStatus() (string, error) {
+ if !c.HasHealthCheck() {
+ return "", fmt.Errorf("container %s has no defined healthcheck", c.ID())
+ }
+
if err := c.syncContainer(); err != nil {
return "", err
}
+
results, err := c.getHealthCheckLog()
if err != nil {
- return "", errors.Wrapf(err, "unable to get healthcheck log for %s", c.ID())
+ return "", fmt.Errorf("unable to get healthcheck log for %s: %w", c.ID(), err)
}
+
return results.Status, nil
}
diff --git a/libpod/healthcheck_linux.go b/libpod/healthcheck_linux.go
index 45b3a0e41..3fb6dfb91 100644
--- a/libpod/healthcheck_linux.go
+++ b/libpod/healthcheck_linux.go
@@ -7,9 +7,9 @@ import (
"os/exec"
"strings"
+ "github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/systemd"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -20,7 +20,7 @@ func (c *Container) createTimer() error {
}
podman, err := os.Executable()
if err != nil {
- return errors.Wrapf(err, "failed to get path for podman for a health check timer")
+ return fmt.Errorf("failed to get path for podman for a health check timer: %w", err)
}
var cmd = []string{}
@@ -35,17 +35,28 @@ func (c *Container) createTimer() error {
conn, err := systemd.ConnectToDBUS()
if err != nil {
- return errors.Wrapf(err, "unable to get systemd connection to add healthchecks")
+ return fmt.Errorf("unable to get systemd connection to add healthchecks: %w", err)
}
conn.Close()
logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd)
systemdRun := exec.Command("systemd-run", cmd...)
if output, err := systemdRun.CombinedOutput(); err != nil {
- return errors.Errorf("%s", output)
+ return fmt.Errorf("%s", output)
}
return nil
}
+// Wait for a message on the channel. Throw an error if the message is not "done".
+func systemdOpSuccessful(c chan string) error {
+ msg := <-c
+ switch msg {
+ case "done":
+ return nil
+ default:
+ return fmt.Errorf("expected %q but received %q", "done", msg)
+ }
+}
+
// startTimer starts a systemd timer for the healthchecks
func (c *Container) startTimer() error {
if c.disableHealthCheckSystemd() {
@@ -53,11 +64,20 @@ func (c *Container) startTimer() error {
}
conn, err := systemd.ConnectToDBUS()
if err != nil {
- return errors.Wrapf(err, "unable to get systemd connection to start healthchecks")
+ return fmt.Errorf("unable to get systemd connection to start healthchecks: %w", err)
}
defer conn.Close()
- _, err = conn.StartUnitContext(context.Background(), fmt.Sprintf("%s.service", c.ID()), "fail", nil)
- return err
+
+ startFile := fmt.Sprintf("%s.service", c.ID())
+ startChan := make(chan string)
+ if _, err := conn.StartUnitContext(context.Background(), startFile, "fail", startChan); err != nil {
+ return err
+ }
+ if err := systemdOpSuccessful(startChan); err != nil {
+ return fmt.Errorf("starting systemd health-check timer %q: %w", startFile, err)
+ }
+
+ return nil
}
// removeTransientFiles removes the systemd timer and unit files
@@ -68,33 +88,40 @@ func (c *Container) removeTransientFiles(ctx context.Context) error {
}
conn, err := systemd.ConnectToDBUS()
if err != nil {
- return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks")
+ return fmt.Errorf("unable to get systemd connection to remove healthchecks: %w", err)
}
defer conn.Close()
+
+ // Errors are returned at the very end. Let's make sure to stop and
+ // clean up as much as possible.
+ stopErrors := []error{}
+
+ // Stop the timer before the service to make sure the timer does not
+ // fire after the service is stopped.
+ timerChan := make(chan string)
timerFile := fmt.Sprintf("%s.timer", c.ID())
- serviceFile := fmt.Sprintf("%s.service", c.ID())
+ if _, err := conn.StopUnitContext(ctx, timerFile, "fail", timerChan); err != nil {
+ if !strings.HasSuffix(err.Error(), ".timer not loaded.") {
+ stopErrors = append(stopErrors, fmt.Errorf("removing health-check timer %q: %w", timerFile, err))
+ }
+ } else if err := systemdOpSuccessful(timerChan); err != nil {
+ stopErrors = append(stopErrors, fmt.Errorf("stopping systemd health-check timer %q: %w", timerFile, err))
+ }
- // If the service has failed (the healthcheck has failed), then
- // the .service file is not removed on stopping the unit file. If
- // we check the properties of the service, it will automatically
- // reset the state. But checking the state takes msecs vs usecs to
- // blindly call reset.
+ // Reset the service before stopping it to make sure it's being removed
+ // on stop.
+ serviceChan := make(chan string)
+ serviceFile := fmt.Sprintf("%s.service", c.ID())
if err := conn.ResetFailedUnitContext(ctx, serviceFile); err != nil {
- logrus.Debugf("failed to reset unit file: %q", err)
+ logrus.Debugf("Failed to reset unit file: %q", err)
}
-
- // We want to ignore errors where the timer unit and/or service unit has already
- // been removed. The error return is generic so we have to check against the
- // string in the error
- if _, err = conn.StopUnitContext(ctx, serviceFile, "fail", nil); err != nil {
+ if _, err := conn.StopUnitContext(ctx, serviceFile, "fail", serviceChan); err != nil {
if !strings.HasSuffix(err.Error(), ".service not loaded.") {
- return errors.Wrapf(err, "unable to remove service file")
- }
- }
- if _, err = conn.StopUnitContext(ctx, timerFile, "fail", nil); err != nil {
- if strings.HasSuffix(err.Error(), ".timer not loaded.") {
- return nil
+ stopErrors = append(stopErrors, fmt.Errorf("removing health-check service %q: %w", serviceFile, err))
}
+ } else if err := systemdOpSuccessful(serviceChan); err != nil {
+ stopErrors = append(stopErrors, fmt.Errorf("stopping systemd health-check service %q: %w", serviceFile, err))
}
- return err
+
+ return errorhandling.JoinErrors(stopErrors)
}
diff --git a/libpod/info.go b/libpod/info.go
index bc49a6cc9..c4193b40d 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -3,6 +3,7 @@ package libpod
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"math"
@@ -25,7 +26,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/selinux/go-selinux"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -34,20 +34,20 @@ func (r *Runtime) info() (*define.Info, error) {
info := define.Info{}
versionInfo, err := define.GetVersion()
if err != nil {
- return nil, errors.Wrapf(err, "error getting version info")
+ return nil, fmt.Errorf("error getting version info: %w", err)
}
info.Version = versionInfo
// get host information
hostInfo, err := r.hostInfo()
if err != nil {
- return nil, errors.Wrapf(err, "error getting host info")
+ return nil, fmt.Errorf("error getting host info: %w", err)
}
info.Host = hostInfo
// get store information
storeInfo, err := r.storeInfo()
if err != nil {
- return nil, errors.Wrapf(err, "error getting store info")
+ return nil, fmt.Errorf("error getting store info: %w", err)
}
info.Store = storeInfo
registries := make(map[string]interface{})
@@ -55,14 +55,14 @@ func (r *Runtime) info() (*define.Info, error) {
sys := r.SystemContext()
data, err := sysregistriesv2.GetRegistries(sys)
if err != nil {
- return nil, errors.Wrapf(err, "error getting registries")
+ return nil, fmt.Errorf("error getting registries: %w", err)
}
for _, reg := range data {
registries[reg.Prefix] = reg
}
regs, err := sysregistriesv2.UnqualifiedSearchRegistries(sys)
if err != nil {
- return nil, errors.Wrapf(err, "error getting registries")
+ return nil, fmt.Errorf("error getting registries: %w", err)
}
if len(regs) > 0 {
registries["search"] = regs
@@ -86,36 +86,36 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
// lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime
mi, err := system.ReadMemInfo()
if err != nil {
- return nil, errors.Wrapf(err, "error reading memory info")
+ return nil, fmt.Errorf("error reading memory info: %w", err)
}
hostDistributionInfo := r.GetHostDistributionInfo()
kv, err := readKernelVersion()
if err != nil {
- return nil, errors.Wrapf(err, "error reading kernel version")
+ return nil, fmt.Errorf("error reading kernel version: %w", err)
}
host, err := os.Hostname()
if err != nil {
- return nil, errors.Wrapf(err, "error getting hostname")
+ return nil, fmt.Errorf("error getting hostname: %w", err)
}
seccompProfilePath, err := DefaultSeccompPath()
if err != nil {
- return nil, errors.Wrapf(err, "error getting Seccomp profile path")
+ return nil, fmt.Errorf("error getting Seccomp profile path: %w", err)
}
// Cgroups version
unified, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
- return nil, errors.Wrapf(err, "error reading cgroups mode")
+ return nil, fmt.Errorf("error reading cgroups mode: %w", err)
}
// Get Map of all available controllers
availableControllers, err := cgroups.GetAvailableControllers(nil, unified)
if err != nil {
- return nil, errors.Wrapf(err, "error getting available cgroup controllers")
+ return nil, fmt.Errorf("error getting available cgroup controllers: %w", err)
}
cpuUtil, err := getCPUUtilization()
if err != nil {
@@ -178,11 +178,11 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
if rootless.IsRootless() {
uidmappings, err := rootless.ReadMappingsProc("/proc/self/uid_map")
if err != nil {
- return nil, errors.Wrapf(err, "error reading uid mappings")
+ return nil, fmt.Errorf("error reading uid mappings: %w", err)
}
gidmappings, err := rootless.ReadMappingsProc("/proc/self/gid_map")
if err != nil {
- return nil, errors.Wrapf(err, "error reading gid mappings")
+ return nil, fmt.Errorf("error reading gid mappings: %w", err)
}
idmappings := define.IDMappings{
GIDMap: gidmappings,
@@ -199,50 +199,38 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
info.OCIRuntime = ociruntimeInfo
}
- up, err := readUptime()
+ duration, err := procUptime()
if err != nil {
- return nil, errors.Wrapf(err, "error reading up time")
+ return nil, fmt.Errorf("error reading up time: %w", err)
}
- // Convert uptime in seconds to a human-readable format
- upSeconds := up + "s"
- upDuration, err := time.ParseDuration(upSeconds)
- if err != nil {
- return nil, errors.Wrapf(err, "error parsing system uptime")
- }
-
- // TODO Isn't there a simple lib for this, something like humantime?
- hoursFound := false
- var timeBuffer bytes.Buffer
- var hoursBuffer bytes.Buffer
- for _, elem := range upDuration.String() {
- timeBuffer.WriteRune(elem)
- if elem == 'h' || elem == 'm' {
- timeBuffer.WriteRune(' ')
- if elem == 'h' {
- hoursFound = true
- }
- }
- if !hoursFound {
- hoursBuffer.WriteRune(elem)
- }
+
+ uptime := struct {
+ hours float64
+ minutes float64
+ seconds float64
+ }{
+ hours: duration.Truncate(time.Hour).Hours(),
+ minutes: duration.Truncate(time.Minute).Minutes(),
+ seconds: duration.Truncate(time.Second).Seconds(),
}
- info.Uptime = timeBuffer.String()
- if hoursFound {
- hours, err := strconv.ParseFloat(hoursBuffer.String(), 64)
- if err == nil {
- days := hours / 24
- info.Uptime = fmt.Sprintf("%s (Approximately %.2f days)", info.Uptime, days)
- }
+ // Could not find a humanize-formatter for time.Duration
+ var buffer bytes.Buffer
+ buffer.WriteString(fmt.Sprintf("%.0fh %.0fm %.2fs",
+ uptime.hours,
+ math.Mod(uptime.seconds, 3600)/60,
+ math.Mod(uptime.seconds, 60),
+ ))
+ if int64(uptime.hours) > 0 {
+ buffer.WriteString(fmt.Sprintf(" (Approximately %.2f days)", uptime.hours/24))
}
+ info.Uptime = buffer.String()
return &info, nil
}
func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) {
- var (
- paused, running, stopped int
- )
+ var paused, running, stopped int
cs := define.ContainerStore{}
cons, err := r.GetAllContainers()
if err != nil {
@@ -252,7 +240,7 @@ func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) {
for _, con := range cons {
state, err := con.State()
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
// container was probably removed
cs.Number--
continue
@@ -283,7 +271,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
}
images, err := r.store.Images()
if err != nil {
- return nil, errors.Wrapf(err, "error getting number of images")
+ return nil, fmt.Errorf("error getting number of images: %w", err)
}
conInfo, err := r.getContainerStoreInfo()
if err != nil {
@@ -293,7 +281,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
var grStats syscall.Statfs_t
if err := syscall.Statfs(r.store.GraphRoot(), &grStats); err != nil {
- return nil, errors.Wrapf(err, "unable to collect graph root usasge for %q", r.store.GraphRoot())
+ return nil, fmt.Errorf("unable to collect graph root usasge for %q: %w", r.store.GraphRoot(), err)
}
allocated := uint64(grStats.Bsize) * grStats.Blocks
info := define.StoreInfo{
@@ -353,16 +341,17 @@ func readKernelVersion() (string, error) {
return string(f[2]), nil
}
-func readUptime() (string, error) {
+func procUptime() (time.Duration, error) {
+ var zero time.Duration
buf, err := ioutil.ReadFile("/proc/uptime")
if err != nil {
- return "", err
+ return zero, err
}
f := bytes.Fields(buf)
if len(f) < 1 {
- return "", fmt.Errorf("invalid uptime")
+ return zero, errors.New("unable to parse uptime from /proc/uptime")
}
- return string(f[0]), nil
+ return time.ParseDuration(string(f[0]) + "s")
}
// GetHostDistributionInfo returns a map containing the host's distribution and version
@@ -418,15 +407,15 @@ func getCPUUtilization() (*define.CPUUsage, error) {
func statToPercent(stats []string) (*define.CPUUsage, error) {
userTotal, err := strconv.ParseFloat(stats[1], 64)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse user value %q", stats[1])
+ return nil, fmt.Errorf("unable to parse user value %q: %w", stats[1], err)
}
systemTotal, err := strconv.ParseFloat(stats[3], 64)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse system value %q", stats[3])
+ return nil, fmt.Errorf("unable to parse system value %q: %w", stats[3], err)
}
idleTotal, err := strconv.ParseFloat(stats[4], 64)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse idle value %q", stats[4])
+ return nil, fmt.Errorf("unable to parse idle value %q: %w", stats[4], err)
}
total := userTotal + systemTotal + idleTotal
s := define.CPUUsage{
diff --git a/libpod/kube.go b/libpod/kube.go
index 5a5fe9d35..3cb0489b3 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"math/rand"
"os"
@@ -14,6 +15,7 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/env"
v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1"
@@ -26,7 +28,6 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -42,8 +43,8 @@ func GenerateForKube(ctx context.Context, ctrs []*Container) (*v1.Pod, error) {
func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, error) {
// Generate the v1.Pod yaml description
var (
- ports []v1.ContainerPort //nolint
- servicePorts []v1.ServicePort //nolint
+ ports []v1.ContainerPort
+ servicePorts []v1.ServicePort
)
allContainers, err := p.allContainers()
@@ -52,11 +53,11 @@ func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, e
}
// If the pod has no containers, no sense to generate YAML
if len(allContainers) == 0 {
- return nil, servicePorts, errors.Errorf("pod %s has no containers", p.ID())
+ return nil, servicePorts, fmt.Errorf("pod %s has no containers", p.ID())
}
// If only an infra container is present, makes no sense to generate YAML
if len(allContainers) == 1 && p.HasInfraContainer() {
- return nil, servicePorts, errors.Errorf("pod %s only has an infra container", p.ID())
+ return nil, servicePorts, fmt.Errorf("pod %s only has an infra container", p.ID())
}
extraHost := make([]v1.HostAlias, 0)
@@ -515,7 +516,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
podDNS.Nameservers = make([]string, 0)
}
for _, s := range servers {
- if !util.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist
+ if !cutil.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist
podDNS.Nameservers = append(podDNS.Nameservers, s)
}
}
@@ -526,7 +527,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
podDNS.Searches = make([]string, 0)
}
for _, d := range domains {
- if !util.StringInSlice(d, podDNS.Searches) { // only append if it does not exist
+ if !cutil.StringInSlice(d, podDNS.Searches) { // only append if it does not exist
podDNS.Searches = append(podDNS.Searches, d)
}
}
@@ -543,7 +544,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
podName := removeUnderscores(ctrs[0].Name())
// Check if the pod name and container name will end up conflicting
// Append -pod if so
- if util.StringInSlice(podName, ctrNames) {
+ if cutil.StringInSlice(podName, ctrNames) {
podName += "-pod"
}
@@ -572,7 +573,7 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
if !c.Privileged() && len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.config.Spec.Linux.Devices)
- return kubeContainer, kubeVolumes, nil, annotations, errors.Wrapf(define.ErrNotImplemented, "linux devices")
+ return kubeContainer, kubeVolumes, nil, annotations, fmt.Errorf("linux devices: %w", define.ErrNotImplemented)
}
if len(c.config.UserVolumes) > 0 {
@@ -742,7 +743,7 @@ func portMappingToContainerPort(portMappings []types.PortMapping) ([]v1.Containe
case "SCTP":
protocol = v1.ProtocolSCTP
default:
- return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol)
+ return containerPorts, fmt.Errorf("unknown network protocol %s", p.Protocol)
}
for i := uint16(0); i < p.Range; i++ {
cp := v1.ContainerPort{
@@ -771,7 +772,7 @@ func libpodEnvVarsToKubeEnvVars(envs []string, imageEnvs []string) ([]v1.EnvVar,
for _, e := range envs {
split := strings.SplitN(e, "=", 2)
if len(split) != 2 {
- return envVars, errors.Errorf("environment variable %s is malformed; should be key=value", e)
+ return envVars, fmt.Errorf("environment variable %s is malformed; should be key=value", e)
}
if defaultEnv[split[0]] == split[1] {
continue
@@ -824,7 +825,7 @@ func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume
// generateKubePersistentVolumeClaim converts a ContainerNamedVolume to a Kubernetes PersistentVolumeClaim
func generateKubePersistentVolumeClaim(v *ContainerNamedVolume) (v1.VolumeMount, v1.Volume) {
- ro := util.StringInSlice("ro", v.Options)
+ ro := cutil.StringInSlice("ro", v.Options)
// To avoid naming conflicts with any host path mounts, add a unique suffix to the volume's name.
name := v.Name + "-pvc"
@@ -857,7 +858,7 @@ func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) {
name += "-host"
vm.Name = name
vm.MountPath = m.Destination
- if util.StringInSlice("ro", m.Options) {
+ if cutil.StringInSlice("ro", m.Options) {
vm.ReadOnly = true
}
@@ -891,11 +892,11 @@ func isHostPathDirectory(hostPathSource string) (bool, error) {
func convertVolumePathToName(hostSourcePath string) (string, error) {
if len(hostSourcePath) == 0 {
- return "", errors.Errorf("hostSourcePath must be specified to generate volume name")
+ return "", errors.New("hostSourcePath must be specified to generate volume name")
}
if len(hostSourcePath) == 1 {
if hostSourcePath != "/" {
- return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
+ return "", fmt.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
}
// add special case name
return "root", nil
@@ -915,7 +916,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
// Find caps in the defaultCaps but not in the container's
// those indicate a dropped cap
for _, capability := range defaultCaps {
- if !util.StringInSlice(capability, containerCaps) {
+ if !cutil.StringInSlice(capability, containerCaps) {
if _, ok := dedupDrop[capability]; !ok {
drop = append(drop, v1.Capability(capability))
dedupDrop[capability] = true
@@ -925,7 +926,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
// Find caps in the container but not in the defaults; those indicate
// an added cap
for _, capability := range containerCaps {
- if !util.StringInSlice(capability, defaultCaps) {
+ if !cutil.StringInSlice(capability, defaultCaps) {
if _, ok := dedupAdd[capability]; !ok {
add = append(add, v1.Capability(capability))
dedupAdd[capability] = true
@@ -1024,7 +1025,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
defer c.lock.Unlock()
}
if err := c.syncContainer(); err != nil {
- return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
+ return nil, fmt.Errorf("unable to sync container during YAML generation: %w", err)
}
mountpoint := c.state.Mountpoint
@@ -1032,7 +1033,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
var err error
mountpoint, err = c.mount()
if err != nil {
- return nil, errors.Wrapf(err, "failed to mount %s mountpoint", c.ID())
+ return nil, fmt.Errorf("failed to mount %s mountpoint: %w", c.ID(), err)
}
defer func() {
if err := c.unmount(false); err != nil {
diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go
index 4685872b6..1379e690a 100644
--- a/libpod/lock/file/file_lock.go
+++ b/libpod/lock/file/file_lock.go
@@ -1,6 +1,7 @@
package file
import (
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -8,13 +9,12 @@ import (
"syscall"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// FileLocks is a struct enabling POSIX lock locking in a shared memory
// segment.
-type FileLocks struct { // nolint
+type FileLocks struct { //nolint:revive // struct name stutters
lockPath string
valid bool
}
@@ -23,7 +23,7 @@ type FileLocks struct { // nolint
func CreateFileLock(path string) (*FileLocks, error) {
_, err := os.Stat(path)
if err == nil {
- return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path)
+ return nil, fmt.Errorf("directory %s exists: %w", path, syscall.EEXIST)
}
if err := os.MkdirAll(path, 0711); err != nil {
return nil, err
@@ -57,11 +57,11 @@ func OpenFileLock(path string) (*FileLocks, error) {
// Close() is only intended to be used while testing the locks.
func (locks *FileLocks) Close() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
err := os.RemoveAll(locks.lockPath)
if err != nil {
- return errors.Wrapf(err, "deleting directory %s", locks.lockPath)
+ return fmt.Errorf("deleting directory %s: %w", locks.lockPath, err)
}
return nil
}
@@ -73,7 +73,7 @@ func (locks *FileLocks) getLockPath(lck uint32) string {
// AllocateLock allocates a lock and returns the index of the lock that was allocated.
func (locks *FileLocks) AllocateLock() (uint32, error) {
if !locks.valid {
- return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
id := uint32(0)
@@ -84,7 +84,7 @@ func (locks *FileLocks) AllocateLock() (uint32, error) {
if os.IsExist(err) {
continue
}
- return 0, errors.Wrap(err, "creating lock file")
+ return 0, fmt.Errorf("creating lock file: %w", err)
}
f.Close()
break
@@ -98,12 +98,12 @@ func (locks *FileLocks) AllocateLock() (uint32, error) {
// returned.
func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
- return errors.Wrapf(err, "error creating lock %d", lck)
+ return fmt.Errorf("error creating lock %d: %w", lck, err)
}
f.Close()
@@ -115,10 +115,10 @@ func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
// The given lock must be already allocated, or an error will be returned.
func (locks *FileLocks) DeallocateLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if err := os.Remove(locks.getLockPath(lck)); err != nil {
- return errors.Wrapf(err, "deallocating lock %d", lck)
+ return fmt.Errorf("deallocating lock %d: %w", lck, err)
}
return nil
}
@@ -127,11 +127,11 @@ func (locks *FileLocks) DeallocateLock(lck uint32) error {
// other containers and pods.
func (locks *FileLocks) DeallocateAllLocks() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
files, err := ioutil.ReadDir(locks.lockPath)
if err != nil {
- return errors.Wrapf(err, "error reading directory %s", locks.lockPath)
+ return fmt.Errorf("error reading directory %s: %w", locks.lockPath, err)
}
var lastErr error
for _, f := range files {
@@ -148,12 +148,12 @@ func (locks *FileLocks) DeallocateAllLocks() error {
// LockFileLock locks the given lock.
func (locks *FileLocks) LockFileLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
l, err := storage.GetLockfile(locks.getLockPath(lck))
if err != nil {
- return errors.Wrapf(err, "error acquiring lock")
+ return fmt.Errorf("error acquiring lock: %w", err)
}
l.Lock()
@@ -163,11 +163,11 @@ func (locks *FileLocks) LockFileLock(lck uint32) error {
// UnlockFileLock unlocks the given lock.
func (locks *FileLocks) UnlockFileLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
l, err := storage.GetLockfile(locks.getLockPath(lck))
if err != nil {
- return errors.Wrapf(err, "error acquiring lock")
+ return fmt.Errorf("error acquiring lock: %w", err)
}
l.Unlock()
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index f7f47760c..f00f01032 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -1,9 +1,9 @@
package lock
import (
+ "errors"
+ "fmt"
"sync"
-
- "github.com/pkg/errors"
)
// Mutex holds a single mutex and whether it has been allocated.
@@ -49,7 +49,7 @@ type InMemoryManager struct {
// of locks.
func NewInMemoryManager(numLocks uint32) (Manager, error) {
if numLocks == 0 {
- return nil, errors.Errorf("must provide a non-zero number of locks")
+ return nil, errors.New("must provide a non-zero number of locks")
}
manager := new(InMemoryManager)
@@ -78,13 +78,13 @@ func (m *InMemoryManager) AllocateLock() (Locker, error) {
}
}
- return nil, errors.Errorf("all locks have been allocated")
+ return nil, errors.New("all locks have been allocated")
}
// RetrieveLock retrieves a lock from the manager.
func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
if id >= m.numLocks {
- return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1)
+ return nil, fmt.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1)
}
return m.locks[id], nil
@@ -94,11 +94,11 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
// use) and returns it.
func (m *InMemoryManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
if id >= m.numLocks {
- return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
+ return nil, fmt.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
}
if m.locks[id].allocated {
- return nil, errors.Errorf("given lock ID %d is already in use, cannot reallocate", id)
+ return nil, fmt.Errorf("given lock ID %d is already in use, cannot reallocate", id)
}
m.locks[id].allocated = true
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index c7f4d1bc5..3334a4018 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -11,11 +11,12 @@ package shm
import "C"
import (
+ "errors"
+ "fmt"
"runtime"
"syscall"
"unsafe"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,7 +29,7 @@ var (
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
// segment.
-type SHMLocks struct { // nolint
+type SHMLocks struct {
lockStruct *C.shm_struct_t
maxLocks uint32
valid bool
@@ -40,7 +41,7 @@ type SHMLocks struct { // nolint
// size used by the underlying implementation.
func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
if numLocks == 0 {
- return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
+ return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
}
locks := new(SHMLocks)
@@ -52,7 +53,7 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
if lockStruct == nil {
// We got a null pointer, so something errored
- return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to create %d locks in %s", numLocks, path)
+ return nil, fmt.Errorf("failed to create %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
}
locks.lockStruct = lockStruct
@@ -69,7 +70,7 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
// segment was created with.
func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
if numLocks == 0 {
- return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
+ return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
}
locks := new(SHMLocks)
@@ -81,7 +82,7 @@ func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
if lockStruct == nil {
// We got a null pointer, so something errored
- return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to open %d locks in %s", numLocks, path)
+ return nil, fmt.Errorf("failed to open %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
}
locks.lockStruct = lockStruct
@@ -103,7 +104,7 @@ func (locks *SHMLocks) GetMaxLocks() uint32 {
// Close() is only intended to be used while testing the locks.
func (locks *SHMLocks) Close() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
locks.valid = false
@@ -124,7 +125,7 @@ func (locks *SHMLocks) Close() error {
// created will result in an error, and no semaphore will be allocated.
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
if !locks.valid {
- return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
// This returns a U64, so we have the full u32 range available for
@@ -138,7 +139,7 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
// that there's no room in the SHM inn for this lock, this tends to send normal people
// down the path of checking disk-space which is not actually their problem.
// Give a clue that it's actually due to num_locks filling up.
- var errFull = errors.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
+ var errFull = fmt.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
return uint32(retCode), errFull
}
return uint32(retCode), syscall.Errno(-1 * retCode)
@@ -153,7 +154,7 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
// returned.
func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
@@ -169,11 +170,11 @@ func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
// The given semaphore must be already allocated, or an error will be returned.
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if sem > locks.maxLocks {
- return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
}
retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
@@ -189,7 +190,7 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
// other containers and pods.
func (locks *SHMLocks) DeallocateAllSemaphores() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
retCode := C.deallocate_all_semaphores(locks.lockStruct)
@@ -210,11 +211,11 @@ func (locks *SHMLocks) DeallocateAllSemaphores() error {
// succeed.
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if sem > locks.maxLocks {
- return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
}
// For pthread mutexes, we have to guarantee lock and unlock happen in
@@ -238,11 +239,11 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error {
// succeed.
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if sem > locks.maxLocks {
- return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
}
retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 3076cd864..fa20bc353 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -4,10 +4,10 @@
package lock
import (
+ "fmt"
"syscall"
"github.com/containers/podman/v4/libpod/lock/shm"
- "github.com/pkg/errors"
)
// SHMLockManager manages shared memory locks.
@@ -66,8 +66,8 @@ func (m *SHMLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
lock.manager = m
if id >= m.locks.GetMaxLocks() {
- return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
- id, m.locks.GetMaxLocks()-1)
+ return nil, fmt.Errorf("lock ID %d is too large - max lock size is %d: %w",
+ id, m.locks.GetMaxLocks()-1, syscall.EINVAL)
}
if err := m.locks.AllocateGivenSemaphore(id); err != nil {
@@ -84,8 +84,8 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
lock.manager = m
if id >= m.locks.GetMaxLocks() {
- return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
- id, m.locks.GetMaxLocks()-1)
+ return nil, fmt.Errorf("lock ID %d is too large - max lock size is %d: %w",
+ id, m.locks.GetMaxLocks()-1, syscall.EINVAL)
}
return lock, nil
diff --git a/libpod/logs/log.go b/libpod/logs/log.go
index 4d7d5ac58..43da8d904 100644
--- a/libpod/logs/log.go
+++ b/libpod/logs/log.go
@@ -1,6 +1,7 @@
package logs
import (
+ "errors"
"fmt"
"io"
"os"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/logs/reversereader"
"github.com/nxadm/tail"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -105,7 +105,7 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
for {
s, err := rr.Read()
if err != nil {
- if errors.Cause(err) == io.EOF {
+ if errors.Is(err, io.EOF) {
inputs <- []string{leftover}
} else {
logrus.Error(err)
@@ -228,11 +228,11 @@ func (l *LogLine) Until(until time.Time) bool {
func NewLogLine(line string) (*LogLine, error) {
splitLine := strings.Split(line, " ")
if len(splitLine) < 4 {
- return nil, errors.Errorf("'%s' is not a valid container log line", line)
+ return nil, fmt.Errorf("'%s' is not a valid container log line", line)
}
logTime, err := time.Parse(LogTimeFormat, splitLine[0])
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
+ return nil, fmt.Errorf("unable to convert time %s from container log: %w", splitLine[0], err)
}
l := LogLine{
Time: logTime,
@@ -249,11 +249,11 @@ func NewLogLine(line string) (*LogLine, error) {
func NewJournaldLogLine(line string, withID bool) (*LogLine, error) {
splitLine := strings.Split(line, " ")
if len(splitLine) < 4 {
- return nil, errors.Errorf("'%s' is not a valid container log line", line)
+ return nil, fmt.Errorf("'%s' is not a valid container log line", line)
}
logTime, err := time.Parse(LogTimeFormat, splitLine[0])
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
+ return nil, fmt.Errorf("unable to convert time %s from container log: %w", splitLine[0], err)
}
var msg, id string
if withID {
diff --git a/libpod/logs/reversereader/reversereader.go b/libpod/logs/reversereader/reversereader.go
index 4fa1a3f88..f2e71fb61 100644
--- a/libpod/logs/reversereader/reversereader.go
+++ b/libpod/logs/reversereader/reversereader.go
@@ -1,10 +1,10 @@
package reversereader
import (
+ "errors"
+ "fmt"
"io"
"os"
-
- "github.com/pkg/errors"
)
// ReverseReader structure for reading a file backwards
@@ -49,12 +49,12 @@ func NewReverseReader(reader *os.File) (*ReverseReader, error) {
// then sets the newoff set one pagesize less than the previous read.
func (r *ReverseReader) Read() (string, error) {
if r.offset < 0 {
- return "", errors.Wrap(io.EOF, "at beginning of file")
+ return "", fmt.Errorf("at beginning of file: %w", io.EOF)
}
// Read from given offset
b := make([]byte, r.readSize)
n, err := r.reader.ReadAt(b, r.offset)
- if err != nil && errors.Cause(err) != io.EOF {
+ if err != nil && !errors.Is(err, io.EOF) {
return "", err
}
if int64(n) < r.readSize {
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 0c124cf0b..c05796768 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -6,6 +6,7 @@ package libpod
import (
"crypto/rand"
"crypto/sha256"
+ "errors"
"fmt"
"io/ioutil"
"net"
@@ -21,22 +22,21 @@ import (
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/resolvconf"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/machine"
"github.com/containers/common/pkg/netns"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/namespaces"
- "github.com/containers/podman/v4/pkg/resolvconf"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/containers/podman/v4/pkg/util"
"github.com/containers/podman/v4/utils"
"github.com/containers/storage/pkg/lockfile"
- spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
@@ -109,7 +109,7 @@ func (r *RootlessNetNS) getPath(path string) string {
func (r *RootlessNetNS) Do(toRun func() error) error {
err := r.ns.Do(func(_ ns.NetNS) error {
// Before we can run the given function,
- // we have to setup all mounts correctly.
+ // we have to set up all mounts correctly.
// The order of the mounts is IMPORTANT.
// The idea of the extra mount ns is to make /run and /var/lib/cni writeable
@@ -127,19 +127,19 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
// this must happen inside the netns thread.
err := unix.Unshare(unix.CLONE_NEWNS)
if err != nil {
- return errors.Wrapf(err, "cannot create a new mount namespace")
+ return fmt.Errorf("cannot create a new mount namespace: %w", err)
}
xdgRuntimeDir, err := util.GetRuntimeDir()
if err != nil {
- return errors.Wrap(err, "could not get runtime directory")
+ return fmt.Errorf("could not get runtime directory: %w", err)
}
newXDGRuntimeDir := r.getPath(xdgRuntimeDir)
// 1. Mount the netns into the new run to keep them accessible.
// Otherwise cni setup will fail because it cannot access the netns files.
err = unix.Mount(xdgRuntimeDir, newXDGRuntimeDir, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
- return errors.Wrap(err, "failed to mount runtime directory for rootless netns")
+ return fmt.Errorf("failed to mount runtime directory for rootless netns: %w", err)
}
// 2. Also keep /run/systemd if it exists.
@@ -150,7 +150,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
newRunSystemd := r.getPath(runSystemd)
err = unix.Mount(runSystemd, newRunSystemd, "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
- return errors.Wrap(err, "failed to mount /run/systemd directory for rootless netns")
+ return fmt.Errorf("failed to mount /run/systemd directory for rootless netns: %w", err)
}
}
@@ -185,7 +185,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
fi, err := os.Lstat(path)
if err != nil {
- return errors.Wrap(err, "failed to stat resolv.conf path")
+ return fmt.Errorf("failed to stat resolv.conf path: %w", err)
}
// no link, just continue
@@ -195,7 +195,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
link, err := os.Readlink(path)
if err != nil {
- return errors.Wrap(err, "failed to read resolv.conf symlink")
+ return fmt.Errorf("failed to read resolv.conf symlink: %w", err)
}
linkCount++
if filepath.IsAbs(link) {
@@ -231,25 +231,25 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
rsr := r.getPath("/run/systemd/resolve")
err = unix.Mount("", rsr, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, "")
if err != nil {
- return errors.Wrapf(err, "failed to mount tmpfs on %q for rootless netns", rsr)
+ return fmt.Errorf("failed to mount tmpfs on %q for rootless netns: %w", rsr, err)
}
}
if strings.HasPrefix(resolvePath, "/run/") {
resolvePath = r.getPath(resolvePath)
err = os.MkdirAll(filepath.Dir(resolvePath), 0700)
if err != nil {
- return errors.Wrap(err, "failed to create rootless-netns resolv.conf directory")
+ return fmt.Errorf("failed to create rootless-netns resolv.conf directory: %w", err)
}
// we want to bind mount on this file so we have to create the file first
_, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0700)
if err != nil {
- return errors.Wrap(err, "failed to create rootless-netns resolv.conf file")
+ return fmt.Errorf("failed to create rootless-netns resolv.conf file: %w", err)
}
}
// mount resolv.conf to make use of the host dns
err = unix.Mount(r.getPath("resolv.conf"), resolvePath, "none", unix.MS_BIND, "")
if err != nil {
- return errors.Wrap(err, "failed to mount resolv.conf for rootless netns")
+ return fmt.Errorf("failed to mount resolv.conf for rootless netns: %w", err)
}
// 4. CNI plugins need access to /var/lib/cni and /run
@@ -274,14 +274,14 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
// make sure to mount var first
err = unix.Mount(varDir, varTarget, "none", unix.MS_BIND, "")
if err != nil {
- return errors.Wrapf(err, "failed to mount %s for rootless netns", varTarget)
+ return fmt.Errorf("failed to mount %s for rootless netns: %w", varTarget, err)
}
// 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts.
runDir := r.getPath("run")
err = unix.Mount(runDir, "/run", "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
- return errors.Wrap(err, "failed to mount /run for rootless netns")
+ return fmt.Errorf("failed to mount /run for rootless netns: %w", err)
}
// run the given function in the correct namespace
@@ -291,7 +291,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
return err
}
-// Cleanup the rootless network namespace if needed.
+// Clean up the rootless network namespace if needed.
// It checks if we have running containers with the bridge network mode.
// Cleanup() expects that r.Lock is locked
func (r *RootlessNetNS) Cleanup(runtime *Runtime) error {
@@ -377,7 +377,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
lfile := filepath.Join(runDir, "rootless-netns.lock")
lock, err := lockfile.GetLockfile(lfile)
if err != nil {
- return nil, errors.Wrap(err, "failed to get rootless-netns lockfile")
+ return nil, fmt.Errorf("failed to get rootless-netns lockfile: %w", err)
}
lock.Lock()
defer func() {
@@ -391,7 +391,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
rootlessNetNsDir := filepath.Join(runDir, rootlessNetNsName)
err = os.MkdirAll(rootlessNetNsDir, 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns directory")
+ return nil, fmt.Errorf("could not create rootless-netns directory: %w", err)
}
nsDir, err := netns.GetNSRunDir()
@@ -411,15 +411,15 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
if err != nil {
if !new {
// return a error if we could not get the namespace and should no create one
- return nil, errors.Wrap(err, "error getting rootless network namespace")
+ return nil, fmt.Errorf("error getting rootless network namespace: %w", err)
}
// create a new namespace
logrus.Debugf("creating rootless network namespace with name %q", netnsName)
ns, err = netns.NewNSWithName(netnsName)
if err != nil {
- return nil, errors.Wrap(err, "error creating rootless network namespace")
+ return nil, fmt.Errorf("error creating rootless network namespace: %w", err)
}
- // setup slirp4netns here
+ // set up slirp4netns here
path := r.config.Engine.NetworkCmdPath
if path == "" {
var err error
@@ -431,7 +431,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
syncR, syncW, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "failed to open pipe")
+ return nil, fmt.Errorf("failed to open pipe: %w", err)
}
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
@@ -442,7 +442,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
}
slirpFeatures, err := checkSlirpFlags(path)
if err != nil {
- return nil, errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err)
+ return nil, fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err)
}
cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures)
if err != nil {
@@ -470,25 +470,25 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
logPath := filepath.Join(r.config.Engine.TmpDir, "slirp4netns-rootless-netns.log")
logFile, err := os.Create(logPath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath)
+ return nil, fmt.Errorf("failed to open slirp4netns log file %s: %w", logPath, err)
}
defer logFile.Close()
// Unlink immediately the file so we won't need to worry about cleaning it up later.
// It is still accessible through the open fd logFile.
if err := os.Remove(logPath); err != nil {
- return nil, errors.Wrapf(err, "delete file %s", logPath)
+ return nil, fmt.Errorf("delete file %s: %w", logPath, err)
}
cmd.Stdout = logFile
cmd.Stderr = logFile
if err := cmd.Start(); err != nil {
- return nil, errors.Wrapf(err, "failed to start slirp4netns process")
+ return nil, fmt.Errorf("failed to start slirp4netns process: %w", err)
}
// create pid file for the slirp4netns process
// this is need to kill the process in the cleanup
pid := strconv.Itoa(cmd.Process.Pid)
err = ioutil.WriteFile(filepath.Join(rootlessNetNsDir, rootlessNetNsSilrp4netnsPidFile), []byte(pid), 0700)
if err != nil {
- return nil, errors.Wrap(err, "unable to write rootless-netns slirp4netns pid file")
+ return nil, fmt.Errorf("unable to write rootless-netns slirp4netns pid file: %w", err)
}
defer func() {
@@ -513,63 +513,59 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
// build a new resolv.conf file which uses the slirp4netns dns server address
resolveIP, err := GetSlirp4netnsDNS(nil)
if err != nil {
- return nil, errors.Wrap(err, "failed to determine default slirp4netns DNS address")
+ return nil, fmt.Errorf("failed to determine default slirp4netns DNS address: %w", err)
}
if netOptions.cidr != "" {
_, cidr, err := net.ParseCIDR(netOptions.cidr)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse slirp4netns cidr")
+ return nil, fmt.Errorf("failed to parse slirp4netns cidr: %w", err)
}
resolveIP, err = GetSlirp4netnsDNS(cidr)
if err != nil {
- return nil, errors.Wrapf(err, "failed to determine slirp4netns DNS address from cidr: %s", cidr.String())
+ return nil, fmt.Errorf("failed to determine slirp4netns DNS address from cidr: %s: %w", cidr.String(), err)
}
}
- conf, err := resolvconf.Get()
- if err != nil {
- return nil, err
- }
- conf, err = resolvconf.FilterResolvDNS(conf.Content, netOptions.enableIPv6, true)
- if err != nil {
- return nil, err
- }
- searchDomains := resolvconf.GetSearchDomains(conf.Content)
- dnsOptions := resolvconf.GetOptions(conf.Content)
- nameServers := resolvconf.GetNameservers(conf.Content)
- _, err = resolvconf.Build(filepath.Join(rootlessNetNsDir, "resolv.conf"), append([]string{resolveIP.String()}, nameServers...), searchDomains, dnsOptions)
- if err != nil {
- return nil, errors.Wrap(err, "failed to create rootless netns resolv.conf")
+ if err := resolvconf.New(&resolvconf.Params{
+ Path: filepath.Join(rootlessNetNsDir, "resolv.conf"),
+ // fake the netns since we want to filter localhost
+ Namespaces: []specs.LinuxNamespace{
+ {Type: specs.NetworkNamespace},
+ },
+ IPv6Enabled: netOptions.enableIPv6,
+ KeepHostServers: true,
+ Nameservers: []string{resolveIP.String()},
+ }); err != nil {
+ return nil, fmt.Errorf("failed to create rootless netns resolv.conf: %w", err)
}
-
// create cni directories to store files
// they will be bind mounted to the correct location in a extra mount ns
err = os.MkdirAll(filepath.Join(rootlessNetNsDir, persistentCNIDir), 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns var directory")
+ return nil, fmt.Errorf("could not create rootless-netns var directory: %w", err)
}
runDir := filepath.Join(rootlessNetNsDir, "run")
err = os.MkdirAll(runDir, 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns run directory")
+ return nil, fmt.Errorf("could not create rootless-netns run directory: %w", err)
}
// relabel the new run directory to the iptables /run label
// this is important, otherwise the iptables command will fail
err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false)
if err != nil {
- return nil, errors.Wrap(err, "could not create relabel rootless-netns run directory")
+ return nil, fmt.Errorf("could not create relabel rootless-netns run directory: %w", err)
}
// create systemd run directory
err = os.MkdirAll(filepath.Join(runDir, "systemd"), 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns systemd directory")
+ return nil, fmt.Errorf("could not create rootless-netns systemd directory: %w", err)
}
// create the directory for the netns files at the same location
// relative to the rootless-netns location
err = os.MkdirAll(filepath.Join(rootlessNetNsDir, nsDir), 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns netns directory")
+ return nil, fmt.Errorf("could not create rootless-netns netns directory: %w", err)
}
}
@@ -660,9 +656,9 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[str
return nil, err
}
- // setup rootless port forwarder when rootless with ports and the network status is empty,
+ // set up rootless port forwarder when rootless with ports and the network status is empty,
// if this is called from network reload the network status will not be empty and we should
- // not setup port because they are still active
+ // not set up port because they are still active
if rootless.IsRootless() && len(ctr.config.PortMappings) > 0 && ctr.getNetworkStatus() == nil {
// set up port forwarder for rootless netns
netnsPath := ctrNS.Path()
@@ -679,7 +675,7 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[str
func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q map[string]types.StatusBlock, retErr error) {
ctrNS, err := netns.NewNS()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID())
+ return nil, nil, fmt.Errorf("error creating network namespace for container %s: %w", ctr.ID(), err)
}
defer func() {
if retErr != nil {
@@ -706,7 +702,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
b := make([]byte, 16)
if _, err := rand.Reader.Read(b); err != nil {
- return errors.Wrapf(err, "failed to generate random netns name")
+ return fmt.Errorf("failed to generate random netns name: %w", err)
}
nsPath, err := netns.GetNSRunDir()
if err != nil {
@@ -727,7 +723,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
}
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
- return errors.Wrapf(err, "cannot mount %s", nsPath)
+ return fmt.Errorf("cannot mount %s: %w", nsPath, err)
}
netNS, err := ns.GetNS(nsPath)
@@ -746,7 +742,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
func joinNetNS(path string) (ns.NetNS, error) {
netNS, err := ns.GetNS(path)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
+ return nil, fmt.Errorf("error retrieving network namespace at %s: %w", path, err)
}
return netNS, nil
@@ -762,7 +758,7 @@ func (r *Runtime) closeNetNS(ctr *Container) error {
}
if err := ctr.state.NetNS.Close(); err != nil {
- return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
+ return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err)
}
ctr.state.NetNS = nil
@@ -778,8 +774,10 @@ func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error {
return err
}
tearDownPod := func() error {
- err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts})
- return errors.Wrapf(err, "error tearing down network namespace configuration for container %s", opts.ContainerID)
+ if err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts}); err != nil {
+ return fmt.Errorf("error tearing down network namespace configuration for container %s: %w", opts.ContainerID, err)
+ }
+ return nil
}
// rootlessNetNS is nil if we are root
@@ -787,7 +785,7 @@ func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error {
// execute the cni setup in the rootless net ns
err = rootlessNetNS.Do(tearDownPod)
if cerr := rootlessNetNS.Cleanup(r); cerr != nil {
- logrus.WithError(err).Error("failed to cleanup rootless netns")
+ logrus.WithError(err).Error("failed to clean up rootless netns")
}
rootlessNetNS.Lock.Unlock()
} else {
@@ -830,12 +828,12 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
// First unmount the namespace
if err := netns.UnmountNS(ctr.state.NetNS); err != nil {
- return errors.Wrapf(err, "error unmounting network namespace for container %s", ctr.ID())
+ return fmt.Errorf("error unmounting network namespace for container %s: %w", ctr.ID(), err)
}
// Now close the open file descriptor
if err := ctr.state.NetNS.Close(); err != nil {
- return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
+ return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err)
}
ctr.state.NetNS = nil
@@ -868,7 +866,7 @@ func getContainerNetNS(ctr *Container) (string, *Container, error) {
// It returns nil when it is set to bridge and an error otherwise.
func isBridgeNetMode(n namespaces.NetworkMode) error {
if !n.IsBridge() {
- return errors.Wrapf(define.ErrNetworkModeInvalid, "%q is not supported", n)
+ return fmt.Errorf("%q is not supported: %w", n, define.ErrNetworkModeInvalid)
}
return nil
}
@@ -884,7 +882,7 @@ func isBridgeNetMode(n namespaces.NetworkMode) error {
// extend this to stop + restart slirp4netns
func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.StatusBlock, error) {
if ctr.state.NetNS == nil {
- return nil, errors.Wrapf(define.ErrCtrStateInvalid, "container %s network is not configured, refusing to reload", ctr.ID())
+ return nil, fmt.Errorf("container %s network is not configured, refusing to reload: %w", ctr.ID(), define.ErrCtrStateInvalid)
}
if err := isBridgeNetMode(ctr.config.NetMode); err != nil {
return nil, err
@@ -930,6 +928,8 @@ func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.Statu
return r.configureNetNS(ctr, ctr.state.NetNS)
}
+// TODO (5.0): return the statistics per network interface
+// This would allow better compat with docker.
func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) {
var netStats *netlink.LinkStatistics
@@ -943,21 +943,39 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) {
return nil, nil
}
- // FIXME get the interface from the container netstatus
- dev := "eth0"
netMode := ctr.config.NetMode
+ netStatus := ctr.getNetworkStatus()
if otherCtr != nil {
netMode = otherCtr.config.NetMode
+ netStatus = otherCtr.getNetworkStatus()
}
if netMode.IsSlirp4netns() {
- dev = "tap0"
+ // create a fake status with correct interface name for the logic below
+ netStatus = map[string]types.StatusBlock{
+ "slirp4netns": {
+ Interfaces: map[string]types.NetInterface{"tap0": {}},
+ },
+ }
}
err := ns.WithNetNSPath(netNSPath, func(_ ns.NetNS) error {
- link, err := netlink.LinkByName(dev)
- if err != nil {
- return err
+ for _, status := range netStatus {
+ for dev := range status.Interfaces {
+ link, err := netlink.LinkByName(dev)
+ if err != nil {
+ return err
+ }
+ if netStats == nil {
+ netStats = link.Attrs().Statistics
+ continue
+ }
+ // Currently only Tx/RxBytes are used.
+ // In the future we should return all stats per interface so that
+ // api users have a better options.
+ stats := link.Attrs().Statistics
+ netStats.TxBytes += stats.TxBytes
+ netStats.RxBytes += stats.RxBytes
+ }
}
- netStats = link.Attrs().Statistics
return nil
})
return netStats, err
@@ -1031,7 +1049,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
// If we have networks - handle that here
if len(networks) > 0 {
if len(networks) != len(netStatus) {
- return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s)", len(networks), networks, len(netStatus))
+ return nil, fmt.Errorf("network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s): %w", len(networks), networks, len(netStatus), define.ErrInternal)
}
settings.Networks = make(map[string]*define.InspectAdditionalNetwork)
@@ -1056,7 +1074,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
// If not joining networks, we should have at most 1 result
if len(netStatus) > 1 {
- return nil, errors.Wrapf(define.ErrInternal, "should have at most 1 network status result if not joining networks, instead got %d", len(netStatus))
+ return nil, fmt.Errorf("should have at most 1 network status result if not joining networks, instead got %d: %w", len(netStatus), define.ErrInternal)
}
if len(netStatus) == 1 {
@@ -1069,7 +1087,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
func (c *Container) joinedNetworkNSPath() string {
for _, namespace := range c.config.Spec.Linux.Namespaces {
- if namespace.Type == spec.NetworkNamespace {
+ if namespace.Type == specs.NetworkNamespace {
return namespace.Path
}
}
@@ -1209,7 +1227,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
_, nameExists := networks[netName]
if !nameExists && len(networks) > 0 {
- return errors.Errorf("container %s is not connected to network %s", nameOrID, netName)
+ return fmt.Errorf("container %s is not connected to network %s", nameOrID, netName)
}
if err := c.syncContainer(); err != nil {
@@ -1228,7 +1246,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
}
if c.state.NetNS == nil {
- return errors.Wrapf(define.ErrNoNetwork, "unable to disconnect %s from %s", nameOrID, netName)
+ return fmt.Errorf("unable to disconnect %s from %s: %w", nameOrID, netName, define.ErrNoNetwork)
}
opts := types.NetworkOptions{
@@ -1346,7 +1364,7 @@ func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNe
return nil
}
if c.state.NetNS == nil {
- return errors.Wrapf(define.ErrNoNetwork, "unable to connect %s to %s", nameOrID, netName)
+ return fmt.Errorf("unable to connect %s to %s: %w", nameOrID, netName, define.ErrNoNetwork)
}
opts := types.NetworkOptions{
diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go
index 788834435..4a6462d46 100644
--- a/libpod/networking_slirp4netns.go
+++ b/libpod/networking_slirp4netns.go
@@ -5,6 +5,7 @@ package libpod
import (
"bytes"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -24,7 +25,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/rootlessport"
"github.com/containers/podman/v4/pkg/servicereaper"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -68,7 +68,7 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) {
cmd := exec.Command(path, "--help")
out, err := cmd.CombinedOutput()
if err != nil {
- return nil, errors.Wrapf(err, "slirp4netns %q", out)
+ return nil, fmt.Errorf("slirp4netns %q: %w", out, err)
}
return &slirpFeatures{
HasDisableHostLoopback: strings.Contains(string(out), "--disable-host-loopback"),
@@ -95,14 +95,14 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
for _, o := range slirpOptions {
parts := strings.SplitN(o, "=", 2)
if len(parts) < 2 {
- return nil, errors.Errorf("unknown option for slirp4netns: %q", o)
+ return nil, fmt.Errorf("unknown option for slirp4netns: %q", o)
}
option, value := parts[0], parts[1]
switch option {
case "cidr":
ipv4, _, err := net.ParseCIDR(value)
if err != nil || ipv4.To4() == nil {
- return nil, errors.Errorf("invalid cidr %q", value)
+ return nil, fmt.Errorf("invalid cidr %q", value)
}
slirp4netnsOpts.cidr = value
case "port_handler":
@@ -112,7 +112,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
case "rootlesskit":
slirp4netnsOpts.isSlirpHostForward = false
default:
- return nil, errors.Errorf("unknown port_handler for slirp4netns: %q", value)
+ return nil, fmt.Errorf("unknown port_handler for slirp4netns: %q", value)
}
case "allow_host_loopback":
switch value {
@@ -121,7 +121,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
case "false":
slirp4netnsOpts.disableHostLoopback = true
default:
- return nil, errors.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value)
+ return nil, fmt.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value)
}
case "enable_ipv6":
switch value {
@@ -130,14 +130,14 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
case "false":
slirp4netnsOpts.enableIPv6 = false
default:
- return nil, errors.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value)
+ return nil, fmt.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value)
}
case "outbound_addr":
ipv4 := net.ParseIP(value)
if ipv4 == nil || ipv4.To4() == nil {
_, err := net.InterfaceByName(value)
if err != nil {
- return nil, errors.Errorf("invalid outbound_addr %q", value)
+ return nil, fmt.Errorf("invalid outbound_addr %q", value)
}
}
slirp4netnsOpts.outboundAddr = value
@@ -146,7 +146,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
if ipv6 == nil || ipv6.To4() != nil {
_, err := net.InterfaceByName(value)
if err != nil {
- return nil, errors.Errorf("invalid outbound_addr6: %q", value)
+ return nil, fmt.Errorf("invalid outbound_addr6: %q", value)
}
}
slirp4netnsOpts.outboundAddr6 = value
@@ -154,10 +154,10 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
var err error
slirp4netnsOpts.mtu, err = strconv.Atoi(value)
if slirp4netnsOpts.mtu < 68 || err != nil {
- return nil, errors.Errorf("invalid mtu %q", value)
+ return nil, fmt.Errorf("invalid mtu %q", value)
}
default:
- return nil, errors.Errorf("unknown option for slirp4netns: %q", o)
+ return nil, fmt.Errorf("unknown option for slirp4netns: %q", o)
}
}
return slirp4netnsOpts, nil
@@ -180,31 +180,31 @@ func createBasicSlirp4netnsCmdArgs(options *slirp4netnsNetworkOptions, features
if options.cidr != "" {
if !features.HasCIDR {
- return nil, errors.Errorf("cidr not supported")
+ return nil, fmt.Errorf("cidr not supported")
}
cmdArgs = append(cmdArgs, fmt.Sprintf("--cidr=%s", options.cidr))
}
if options.enableIPv6 {
if !features.HasIPv6 {
- return nil, errors.Errorf("enable_ipv6 not supported")
+ return nil, fmt.Errorf("enable_ipv6 not supported")
}
cmdArgs = append(cmdArgs, "--enable-ipv6")
}
if options.outboundAddr != "" {
if !features.HasOutboundAddr {
- return nil, errors.Errorf("outbound_addr not supported")
+ return nil, fmt.Errorf("outbound_addr not supported")
}
cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr=%s", options.outboundAddr))
}
if options.outboundAddr6 != "" {
if !features.HasOutboundAddr || !features.HasIPv6 {
- return nil, errors.Errorf("outbound_addr6 not supported")
+ return nil, fmt.Errorf("outbound_addr6 not supported")
}
if !options.enableIPv6 {
- return nil, errors.Errorf("enable_ipv6=true is required for outbound_addr6")
+ return nil, fmt.Errorf("enable_ipv6=true is required for outbound_addr6")
}
cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr6=%s", options.outboundAddr6))
}
@@ -225,7 +225,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
syncR, syncW, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to open pipe")
+ return fmt.Errorf("failed to open pipe: %w", err)
}
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
@@ -243,7 +243,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
}
slirpFeatures, err := checkSlirpFlags(path)
if err != nil {
- return errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err)
+ return fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err)
}
cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures)
if err != nil {
@@ -266,7 +266,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
if !ctr.config.PostConfigureNetNS {
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to create rootless network sync pipe")
+ return fmt.Errorf("failed to create rootless network sync pipe: %w", err)
}
netnsPath = netns.Path()
cmdArgs = append(cmdArgs, "--netns-type=path", netnsPath, "tap0")
@@ -295,13 +295,13 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
logFile, err := os.Create(logPath)
if err != nil {
- return errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath)
+ return fmt.Errorf("failed to open slirp4netns log file %s: %w", logPath, err)
}
defer logFile.Close()
// Unlink immediately the file so we won't need to worry about cleaning it up later.
// It is still accessible through the open fd logFile.
if err := os.Remove(logPath); err != nil {
- return errors.Wrapf(err, "delete file %s", logPath)
+ return fmt.Errorf("delete file %s: %w", logPath, err)
}
cmd.Stdout = logFile
cmd.Stderr = logFile
@@ -357,7 +357,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
if netOptions.enableIPv6 {
slirpReadyWg.Done()
}
- return errors.Wrapf(err, "failed to start slirp4netns process")
+ return fmt.Errorf("failed to start slirp4netns process: %w", err)
}
defer func() {
servicereaper.AddPID(cmd.Process.Pid)
@@ -381,7 +381,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
if netOptions.cidr != "" {
ipv4, ipv4network, err := net.ParseCIDR(netOptions.cidr)
if err != nil || ipv4.To4() == nil {
- return errors.Errorf("invalid cidr %q", netOptions.cidr)
+ return fmt.Errorf("invalid cidr %q", netOptions.cidr)
}
ctr.slirp4netnsSubnet = ipv4network
}
@@ -405,7 +405,7 @@ func GetSlirp4netnsIP(subnet *net.IPNet) (*net.IP, error) {
}
expectedIP, err := addToIP(slirpSubnet, uint32(100))
if err != nil {
- return nil, errors.Wrapf(err, "error calculating expected ip for slirp4netns")
+ return nil, fmt.Errorf("error calculating expected ip for slirp4netns: %w", err)
}
return expectedIP, nil
}
@@ -419,7 +419,7 @@ func GetSlirp4netnsGateway(subnet *net.IPNet) (*net.IP, error) {
}
expectedGatewayIP, err := addToIP(slirpSubnet, uint32(2))
if err != nil {
- return nil, errors.Wrapf(err, "error calculating expected gateway ip for slirp4netns")
+ return nil, fmt.Errorf("error calculating expected gateway ip for slirp4netns: %w", err)
}
return expectedGatewayIP, nil
}
@@ -433,7 +433,7 @@ func GetSlirp4netnsDNS(subnet *net.IPNet) (*net.IP, error) {
}
expectedDNSIP, err := addToIP(slirpSubnet, uint32(3))
if err != nil {
- return nil, errors.Wrapf(err, "error calculating expected dns ip for slirp4netns")
+ return nil, fmt.Errorf("error calculating expected dns ip for slirp4netns: %w", err)
}
return expectedDNSIP, nil
}
@@ -448,11 +448,11 @@ func addToIP(subnet *net.IPNet, offset uint32) (*net.IP, error) {
ipNewRaw := ipInteger + offset
// Avoid overflows
if ipNewRaw < ipInteger {
- return nil, errors.Errorf("integer overflow while calculating ip address offset, %s + %d", ipFixed, offset)
+ return nil, fmt.Errorf("integer overflow while calculating ip address offset, %s + %d", ipFixed, offset)
}
ipNew := net.IPv4(byte(ipNewRaw>>24), byte(ipNewRaw>>16&0xFF), byte(ipNewRaw>>8)&0xFF, byte(ipNewRaw&0xFF))
if !subnet.Contains(ipNew) {
- return nil, errors.Errorf("calculated ip address %s is not within given subnet %s", ipNew.String(), subnet.String())
+ return nil, fmt.Errorf("calculated ip address %s is not within given subnet %s", ipNew.String(), subnet.String())
}
return &ipNew, nil
}
@@ -465,7 +465,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
b := make([]byte, 16)
for {
if err := syncR.SetDeadline(time.Now().Add(timeout)); err != nil {
- return errors.Wrapf(err, "error setting %s pipe timeout", prog)
+ return fmt.Errorf("error setting %s pipe timeout: %w", prog, err)
}
// FIXME: return err as soon as proc exits, without waiting for timeout
if _, err := syncR.Read(b); err == nil {
@@ -476,7 +476,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
var status syscall.WaitStatus
pid, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
if err != nil {
- return errors.Wrapf(err, "failed to read %s process status", prog)
+ return fmt.Errorf("failed to read %s process status: %w", prog, err)
}
if pid != cmd.Process.Pid {
continue
@@ -488,16 +488,16 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
}
logContent, err := ioutil.ReadAll(logFile)
if err != nil {
- return errors.Wrapf(err, "%s failed", prog)
+ return fmt.Errorf("%s failed: %w", prog, err)
}
- return errors.Errorf("%s failed: %q", prog, logContent)
+ return fmt.Errorf("%s failed: %q", prog, logContent)
}
if status.Signaled() {
- return errors.Errorf("%s killed by signal", prog)
+ return fmt.Errorf("%s killed by signal", prog)
}
continue
}
- return errors.Wrapf(err, "failed to read from %s sync pipe", prog)
+ return fmt.Errorf("failed to read from %s sync pipe: %w", prog, err)
}
}
return nil
@@ -506,7 +506,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath string, netStatus map[string]types.StatusBlock) error {
syncR, syncW, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to open pipe")
+ return fmt.Errorf("failed to open pipe: %w", err)
}
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
@@ -514,19 +514,19 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("rootlessport-%s.log", ctr.config.ID))
logFile, err := os.Create(logPath)
if err != nil {
- return errors.Wrapf(err, "failed to open rootlessport log file %s", logPath)
+ return fmt.Errorf("failed to open rootlessport log file %s: %w", logPath, err)
}
defer logFile.Close()
// Unlink immediately the file so we won't need to worry about cleaning it up later.
// It is still accessible through the open fd logFile.
if err := os.Remove(logPath); err != nil {
- return errors.Wrapf(err, "delete file %s", logPath)
+ return fmt.Errorf("delete file %s: %w", logPath, err)
}
if !ctr.config.PostConfigureNetNS {
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to create rootless port sync pipe")
+ return fmt.Errorf("failed to create rootless port sync pipe: %w", err)
}
}
@@ -566,7 +566,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
Setpgid: true,
}
if err := cmd.Start(); err != nil {
- return errors.Wrapf(err, "failed to start rootlessport process")
+ return fmt.Errorf("failed to start rootlessport process: %w", err)
}
defer func() {
servicereaper.AddPID(cmd.Process.Pid)
@@ -579,7 +579,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
if stdoutStr != "" {
// err contains full debug log and too verbose, so return stdoutStr
logrus.Debug(err)
- return errors.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n"))
+ return fmt.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n"))
}
return err
}
@@ -612,7 +612,7 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
// wait that API socket file appears before trying to use it.
if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout); err != nil {
- return errors.Wrapf(err, "waiting for slirp4nets to create the api socket file %s", apiSocket)
+ return fmt.Errorf("waiting for slirp4nets to create the api socket file %s: %w", apiSocket, err)
}
// for each port we want to add we need to open a connection to the slirp4netns control socket
@@ -639,7 +639,7 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport uint16) error {
conn, err := net.Dial("unix", apiSocket)
if err != nil {
- return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
+ return fmt.Errorf("cannot open connection to %s: %w", apiSocket, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -659,27 +659,27 @@ func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport ui
// to the socket, as requested by slirp4netns.
data, err := json.Marshal(&apiCmd)
if err != nil {
- return errors.Wrapf(err, "cannot marshal JSON for slirp4netns")
+ return fmt.Errorf("cannot marshal JSON for slirp4netns: %w", err)
}
if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil {
- return errors.Wrapf(err, "cannot write to control socket %s", apiSocket)
+ return fmt.Errorf("cannot write to control socket %s: %w", apiSocket, err)
}
if err := conn.(*net.UnixConn).CloseWrite(); err != nil {
- return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
+ return fmt.Errorf("cannot shutdown the socket %s: %w", apiSocket, err)
}
buf := make([]byte, 2048)
readLength, err := conn.Read(buf)
if err != nil {
- return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
+ return fmt.Errorf("cannot read from control socket %s: %w", apiSocket, err)
}
// if there is no 'error' key in the received JSON data, then the operation was
// successful.
var y map[string]interface{}
if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
- return errors.Wrapf(err, "error parsing error status from slirp4netns")
+ return fmt.Errorf("error parsing error status from slirp4netns: %w", err)
}
if e, found := y["error"]; found {
- return errors.Errorf("from slirp4netns while setting up port redirection: %v", e)
+ return fmt.Errorf("from slirp4netns while setting up port redirection: %v", e)
}
return nil
}
@@ -722,21 +722,21 @@ func (c *Container) reloadRootlessRLKPortMapping() error {
conn, err := openUnixSocket(filepath.Join(c.runtime.config.Engine.TmpDir, "rp", c.config.ID))
if err != nil {
- return errors.Wrap(err, "could not reload rootless port mappings, port forwarding may no longer work correctly")
+ return fmt.Errorf("could not reload rootless port mappings, port forwarding may no longer work correctly: %w", err)
}
defer conn.Close()
enc := json.NewEncoder(conn)
err = enc.Encode(childIP)
if err != nil {
- return errors.Wrap(err, "port reloading failed")
+ return fmt.Errorf("port reloading failed: %w", err)
}
b, err := ioutil.ReadAll(conn)
if err != nil {
- return errors.Wrap(err, "port reloading failed")
+ return fmt.Errorf("port reloading failed: %w", err)
}
data := string(b)
if data != "OK" {
- return errors.Errorf("port reloading failed: %s", data)
+ return fmt.Errorf("port reloading failed: %s", data)
}
return nil
}
diff --git a/libpod/oci.go b/libpod/oci.go
index 09f856ac7..70053db1b 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -3,6 +3,7 @@ package libpod
import (
"net/http"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
)
@@ -12,9 +13,7 @@ import (
// management logic - e.g., we do not expect it to determine on its own that
// calling 'UnpauseContainer()' on a container that is not paused is an error.
// The code calling the OCIRuntime will manage this.
-// TODO: May want to move the Attach() code under this umbrella. It's highly OCI
-// runtime dependent.
-// TODO: May want to move the conmon cleanup code here too - it depends on
+// TODO: May want to move the conmon cleanup code here - it depends on
// Conmon being in use.
type OCIRuntime interface {
// Name returns the name of the runtime.
@@ -52,6 +51,8 @@ type OCIRuntime interface {
// UnpauseContainer unpauses the given container.
UnpauseContainer(ctr *Container) error
+ // Attach to a container.
+ Attach(ctr *Container, params *AttachOptions) error
// HTTPAttach performs an attach intended to be transported over HTTP.
// For terminal attach, the container's output will be directly streamed
// to output; otherwise, STDOUT and STDERR will be multiplexed, with
@@ -66,7 +67,7 @@ type OCIRuntime interface {
// client.
HTTPAttach(ctr *Container, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) error
// AttachResize resizes the terminal in use by the given container.
- AttachResize(ctr *Container, newSize define.TerminalSize) error
+ AttachResize(ctr *Container, newSize resize.TerminalSize) error
// ExecContainer executes a command in a running container.
// Returns an int (PID of exec session), error channel (errors from
@@ -76,7 +77,7 @@ type OCIRuntime interface {
// running, in a goroutine that will return via the chan error in the
// return signature.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
- ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *define.TerminalSize) (int, chan error, error)
+ ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *resize.TerminalSize) (int, chan error, error)
// ExecContainerHTTP executes a command in a running container and
// attaches its standard streams to a provided hijacked HTTP session.
// Maintains the same invariants as ExecContainer (returns on session
@@ -84,14 +85,14 @@ type OCIRuntime interface {
// The HTTP attach itself maintains the same invariants as HTTPAttach.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, r *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *define.TerminalSize) (int, chan error, error)
+ streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *resize.TerminalSize) (int, chan error, error)
// ExecContainerDetached executes a command in a running container, but
// does not attach to it. Returns the PID of the exec session and an
// error (if starting the exec session failed)
ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error)
// ExecAttachResize resizes the terminal of a running exec session. Only
// allowed with sessions that were created with a TTY.
- ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error
+ ExecAttachResize(ctr *Container, sessionID string, newSize resize.TerminalSize) error
// ExecStopContainer stops a given exec session in a running container.
// SIGTERM with be sent initially, then SIGKILL after the given timeout.
// If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will
@@ -149,6 +150,30 @@ type OCIRuntime interface {
RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeInfo, error)
}
+// AttachOptions are options used when attached to a container or an exec
+// session.
+type AttachOptions struct {
+ // Streams are the streams to attach to.
+ Streams *define.AttachStreams
+ // DetachKeys containers the key combination that will detach from the
+ // attach session. Empty string is assumed as no detach keys - user
+ // detach is impossible. If unset, defaults from containers.conf will be
+ // used.
+ DetachKeys *string
+ // InitialSize is the initial size of the terminal. Set before the
+ // attach begins.
+ InitialSize *resize.TerminalSize
+ // AttachReady signals when the attach has successfully completed and
+ // streaming has begun.
+ AttachReady chan<- bool
+ // Start indicates that the container should be started if it is not
+ // already running.
+ Start bool
+ // Started signals when the container has been successfully started.
+ // Required if Start is true, unused otherwise.
+ Started chan<- bool
+}
+
// ExecOptions are options passed into ExecContainer. They control the command
// that will be executed and how the exec will proceed.
type ExecOptions struct {
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_conmon_attach_linux.go
index 06f8f8719..aa55aa6f5 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_conmon_attach_linux.go
@@ -4,6 +4,7 @@
package libpod
import (
+ "errors"
"fmt"
"io"
"net"
@@ -12,12 +13,11 @@ import (
"syscall"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/resize"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/containers/podman/v4/pkg/kubeutils"
- "github.com/containers/podman/v4/utils"
"github.com/moby/term"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -38,17 +38,26 @@ func openUnixSocket(path string) (*net.UnixConn, error) {
return net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: fmt.Sprintf("/proc/self/fd/%d", fd), Net: "unixpacket"})
}
-// Attach to the given container
-// Does not check if state is appropriate
-// started is only required if startContainer is true
-func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error {
+// Attach to the given container.
+// Does not check if state is appropriate.
+// started is only required if startContainer is true.
+func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error {
passthrough := c.LogDriver() == define.PassthroughLogging
- if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput && !passthrough {
- return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ if params == nil || params.Streams == nil {
+ return fmt.Errorf("must provide parameters to Attach: %w", define.ErrInternal)
}
- if startContainer && started == nil {
- return errors.Wrapf(define.ErrInternal, "started chan not passed when startContainer set")
+
+ if !params.Streams.AttachOutput && !params.Streams.AttachError && !params.Streams.AttachInput && !passthrough {
+ return fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
+ }
+ if params.Start && params.Started == nil {
+ return fmt.Errorf("started chan not passed when startContainer set: %w", define.ErrInternal)
+ }
+
+ keys := config.DefaultDetachKeys
+ if params.DetachKeys != nil {
+ keys = *params.DetachKeys
}
detachKeys, err := processDetachKeys(keys)
@@ -60,7 +69,12 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
if !passthrough {
logrus.Debugf("Attaching to container %s", c.ID())
- registerResizeFunc(resize, c.bundlePath())
+ // If we have a resize, do it.
+ if params.InitialSize != nil {
+ if err := r.AttachResize(c, *params.InitialSize); err != nil {
+ return err
+ }
+ }
attachSock, err := c.AttachSocketPath()
if err != nil {
@@ -69,7 +83,7 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
conn, err = openUnixSocket(attachSock)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", attachSock, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -80,22 +94,22 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
// If starting was requested, start the container and notify when that's
// done.
- if startContainer {
+ if params.Start {
if err := c.start(); err != nil {
return err
}
- started <- true
+ params.Started <- true
}
if passthrough {
return nil
}
- receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys)
- if attachRdy != nil {
- attachRdy <- true
+ receiveStdoutError, stdinDone := setupStdioChannels(params.Streams, conn, detachKeys)
+ if params.AttachReady != nil {
+ params.AttachReady <- true
}
- return readStdio(conn, streams, receiveStdoutError, stdinDone)
+ return readStdio(conn, params.Streams, receiveStdoutError, stdinDone)
}
// Attach to the given container's exec session
@@ -106,7 +120,7 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
// conmon will then send the exit code of the exec process, or an error in the exec session
// startFd must be the input side of the fd.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
-// conmon will wait to start the exec session until the parent process has setup the console socket.
+// conmon will wait to start the exec session until the parent process has set up the console socket.
// Once attachToExec successfully attaches to the console socket, the child conmon process responsible for calling runtime exec
// will read from the output side of start fd, thus learning to start the child process.
// Thus, the order goes as follow:
@@ -116,12 +130,12 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
// 4. attachToExec sends on startFd, signalling it has attached to the socket and child is ready to go
// 5. child receives on startFd, runs the runtime exec command
// attachToExec is responsible for closing startFd and attachFd
-func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File, newSize *define.TerminalSize) error {
+func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File, newSize *resize.TerminalSize) error {
if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
- return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ return fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
}
if startFd == nil || attachFd == nil {
- return errors.Wrapf(define.ErrInvalidArg, "start sync pipe and attach sync pipe must be defined for exec attach")
+ return fmt.Errorf("start sync pipe and attach sync pipe must be defined for exec attach: %w", define.ErrInvalidArg)
}
defer errorhandling.CloseQuiet(startFd)
@@ -160,7 +174,7 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se
// 2: then attach
conn, err := openUnixSocket(sockPath)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", sockPath, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -186,13 +200,13 @@ func processDetachKeys(keys string) ([]byte, error) {
}
detachKeys, err := term.ToBytes(keys)
if err != nil {
- return nil, errors.Wrapf(err, "invalid detach keys")
+ return nil, fmt.Errorf("invalid detach keys: %w", err)
}
return detachKeys, nil
}
-func registerResizeFunc(resize <-chan define.TerminalSize, bundlePath string) {
- kubeutils.HandleResizing(resize, func(size define.TerminalSize) {
+func registerResizeFunc(r <-chan resize.TerminalSize, bundlePath string) {
+ resize.HandleResizing(r, func(size resize.TerminalSize) {
controlPath := filepath.Join(bundlePath, "ctl")
controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
if err != nil {
@@ -218,7 +232,7 @@ func setupStdioChannels(streams *define.AttachStreams, conn *net.UnixConn, detac
go func() {
var err error
if streams.AttachInput {
- _, err = utils.CopyDetachable(conn, streams.InputStream, detachKeys)
+ _, err = util.CopyDetachable(conn, streams.InputStream, detachKeys)
}
stdinDone <- err
}()
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index 70124bec1..16cd7ef9f 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -13,28 +14,28 @@ import (
"github.com/containers/common/pkg/capabilities"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/resize"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/lookup"
"github.com/containers/podman/v4/pkg/util"
- "github.com/containers/podman/v4/utils"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// ExecContainer executes a command in a running container
-func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *define.TerminalSize) (int, chan error, error) {
+func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *resize.TerminalSize) (int, chan error, error) {
if options == nil {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ return -1, nil, fmt.Errorf("must provide an ExecOptions struct to ExecContainer: %w", define.ErrInvalidArg)
}
if len(options.Cmd) == 0 {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ return -1, nil, fmt.Errorf("must provide a command to execute: %w", define.ErrInvalidArg)
}
if sessionID == "" {
- return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ return -1, nil, fmt.Errorf("must provide a session ID for exec: %w", define.ErrEmptyID)
}
// TODO: Should we default this to false?
@@ -73,7 +74,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
}()
if err := execCmd.Wait(); err != nil {
- return -1, nil, errors.Wrapf(err, "cannot run conmon")
+ return -1, nil, fmt.Errorf("cannot run conmon: %w", err)
}
pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog)
@@ -84,15 +85,15 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
// ExecContainerHTTP executes a new command in an existing container and
// forwards its standard streams over an attach
func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, req *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *define.TerminalSize) (int, chan error, error) {
+ streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *resize.TerminalSize) (int, chan error, error) {
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ return -1, nil, fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
}
}
if options == nil {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ return -1, nil, fmt.Errorf("must provide exec options to ExecContainerHTTP: %w", define.ErrInvalidArg)
}
detachString := config.DefaultDetachKeys
@@ -156,7 +157,7 @@ type conmonPipeData struct {
// not attach to it.
func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
if options == nil {
- return -1, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ return -1, fmt.Errorf("must provide exec options to ExecContainerHTTP: %w", define.ErrInvalidArg)
}
var ociLog string
@@ -187,7 +188,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin
// Wait for conmon to succeed, when return.
if err := execCmd.Wait(); err != nil {
- return -1, errors.Wrapf(err, "cannot run conmon")
+ return -1, fmt.Errorf("cannot run conmon: %w", err)
}
pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog)
@@ -196,7 +197,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin
}
// ExecAttachResize resizes the TTY of the given exec session.
-func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error {
+func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize resize.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
if err != nil {
return err
@@ -204,7 +205,7 @@ func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, ne
defer controlFile.Close()
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
- return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ return fmt.Errorf("failed to write to ctl file to resize terminal: %w", err)
}
return nil
@@ -225,7 +226,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err)
}
if timeout > 0 {
@@ -235,7 +236,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("error killing container %s exec session %s PID %d with SIGTERM: %w", ctr.ID(), sessionID, pid, err)
}
// Wait for the PID to stop
@@ -253,12 +254,12 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("error killing container %s exec session %s PID %d with SIGKILL: %w", ctr.ID(), sessionID, pid, err)
}
// Wait for the PID to stop
if err := waitPidStop(pid, killContainerTimeout); err != nil {
- return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("timed out waiting for container %s exec session %s PID %d to stop after SIGKILL: %w", ctr.ID(), sessionID, pid, err)
}
return nil
@@ -279,7 +280,7 @@ func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (b
if err == unix.ESRCH {
return false, nil
}
- return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ return false, fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err)
}
return true, nil
@@ -289,7 +290,7 @@ func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (b
func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
// We don't even use container, so don't validity check it
if sessionID == "" {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
+ return "", fmt.Errorf("must provide a valid session ID to get attach socket path: %w", define.ErrInvalidArg)
}
return filepath.Join(ctr.execBundlePath(sessionID), "attach"), nil
@@ -325,20 +326,20 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
pipes := new(execPipes)
if options == nil {
- return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ return nil, nil, fmt.Errorf("must provide an ExecOptions struct to ExecContainer: %w", define.ErrInvalidArg)
}
if len(options.Cmd) == 0 {
- return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ return nil, nil, fmt.Errorf("must provide a command to execute: %w", define.ErrInvalidArg)
}
if sessionID == "" {
- return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ return nil, nil, fmt.Errorf("must provide a session ID for exec: %w", define.ErrEmptyID)
}
// create sync pipe to receive the pid
parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
}
pipes.syncPipe = parentSyncPipe
@@ -352,7 +353,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
// attachToExec is responsible for closing parentStartPipe
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
}
pipes.startPipe = parentStartPipe
@@ -362,7 +363,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
// attachToExec is responsible for closing parentAttachPipe
parentAttachPipe, childAttachPipe, err := newPipe()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
}
pipes.attachPipe = parentAttachPipe
@@ -471,7 +472,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
childrenClosed = true
if err != nil {
- return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
+ return nil, nil, fmt.Errorf("cannot start container %s: %w", c.ID(), err)
}
if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
return nil, nil, err
@@ -487,14 +488,14 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
}
// Attach to a container over HTTP
-func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string, newSize *define.TerminalSize, runtimeName string) (deferredErr error) {
+func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string, newSize *resize.TerminalSize, runtimeName string) (deferredErr error) {
// NOTE: As you may notice, the attach code is quite complex.
// Many things happen concurrently and yet are interdependent.
// If you ever change this function, make sure to write to the
// conmonPipeDataChan in case of an error.
if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
- err := errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
+ err := fmt.Errorf("must provide a start and attach pipe to finish an exec attach: %w", define.ErrInvalidArg)
conmonPipeDataChan <- conmonPipeData{-1, err}
return err
}
@@ -537,7 +538,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
conn, err := openUnixSocket(sockPath)
if err != nil {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", sockPath, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -558,13 +559,13 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
hijacker, ok := w.(http.Hijacker)
if !ok {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Errorf("unable to hijack connection")
+ return errors.New("unable to hijack connection")
}
httpCon, httpBuf, err := hijacker.Hijack()
if err != nil {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Wrapf(err, "error hijacking connection")
+ return fmt.Errorf("error hijacking connection: %w", err)
}
hijackDone <- true
@@ -575,7 +576,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
// Force a flush after the header is written.
if err := httpBuf.Flush(); err != nil {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Wrapf(err, "error flushing HTTP hijack header")
+ return fmt.Errorf("error flushing HTTP hijack header: %w", err)
}
go func() {
@@ -607,7 +608,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
if attachStdin {
go func() {
logrus.Debugf("Beginning STDIN copy")
- _, err := utils.CopyDetachable(conn, httpBuf, detachKeys)
+ _, err := cutil.CopyDetachable(conn, httpBuf, detachKeys)
logrus.Debugf("STDIN copy completed")
stdinChan <- err
}()
@@ -723,7 +724,7 @@ func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessio
if len(addGroups) > 0 {
sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s exec session %s", c.ID(), sessionID)
+ return nil, fmt.Errorf("error looking up supplemental groups for container %s exec session %s: %w", c.ID(), sessionID, err)
}
}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 6aa7ce6dc..0cdfe90e9 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -7,6 +7,7 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -23,8 +24,13 @@ import (
"text/template"
"time"
+ runcconfig "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/devices"
+
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/resize"
+ cutil "github.com/containers/common/pkg/util"
conmonConfig "github.com/containers/conmon/runner/config"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/logs"
@@ -38,7 +44,6 @@ import (
pmount "github.com/containers/storage/pkg/mount"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -78,7 +83,7 @@ type ConmonOCIRuntime struct {
// libpod.
func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeFlags []string, runtimeCfg *config.Config) (OCIRuntime, error) {
if name == "" {
- return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name")
+ return nil, fmt.Errorf("the OCI runtime must be provided a non-empty name: %w", define.ErrInvalidArg)
}
// Make lookup tables for runtime support
@@ -122,7 +127,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
if os.IsNotExist(err) {
continue
}
- return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path", name)
+ return nil, fmt.Errorf("cannot stat OCI runtime %s path: %w", name, err)
}
if !stat.Mode().IsRegular() {
continue
@@ -143,7 +148,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
}
if !foundPath {
- return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name)
+ return nil, fmt.Errorf("no valid executable found for OCI runtime %s: %w", name, define.ErrInvalidArg)
}
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
@@ -152,7 +157,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
- return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory")
+ return nil, fmt.Errorf("error creating OCI runtime exit files directory: %w", err)
}
}
return runtime, nil
@@ -228,7 +233,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
// changes are propagated to the host.
err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "")
if err != nil {
- return 0, errors.Wrapf(err, "cannot make /sys slave")
+ return 0, fmt.Errorf("cannot make /sys slave: %w", err)
}
mounts, err := pmount.GetMounts()
@@ -241,7 +246,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
}
err = unix.Unmount(m.Mountpoint, 0)
if err != nil && !os.IsNotExist(err) {
- return 0, errors.Wrapf(err, "cannot unmount %s", m.Mountpoint)
+ return 0, fmt.Errorf("cannot unmount %s: %w", m.Mountpoint, err)
}
}
return r.createOCIContainer(ctr, restoreOptions)
@@ -264,11 +269,6 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
// status, but will instead only check for the existence of the conmon exit file
// and update state to stopped if it exists.
func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
- exitFile, err := r.ExitFilePath(ctr)
- if err != nil {
- return err
- }
-
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
@@ -284,17 +284,17 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
outPipe, err := cmd.StdoutPipe()
if err != nil {
- return errors.Wrapf(err, "getting stdout pipe")
+ return fmt.Errorf("getting stdout pipe: %w", err)
}
errPipe, err := cmd.StderrPipe()
if err != nil {
- return errors.Wrapf(err, "getting stderr pipe")
+ return fmt.Errorf("getting stderr pipe: %w", err)
}
if err := cmd.Start(); err != nil {
out, err2 := ioutil.ReadAll(errPipe)
if err2 != nil {
- return errors.Wrapf(err, "error getting container %s state", ctr.ID())
+ return fmt.Errorf("error getting container %s state: %w", ctr.ID(), err)
}
if strings.Contains(string(out), "does not exist") || strings.Contains(string(out), "No such file") {
if err := ctr.removeConmonFiles(); err != nil {
@@ -305,7 +305,7 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
ctr.state.State = define.ContainerStateExited
return nil
}
- return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
+ return fmt.Errorf("error getting container %s state. stderr/out: %s: %w", ctr.ID(), out, err)
}
defer func() {
_ = cmd.Wait()
@@ -316,10 +316,10 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
}
out, err := ioutil.ReadAll(outPipe)
if err != nil {
- return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
+ return fmt.Errorf("error reading stdout: %s: %w", ctr.ID(), err)
}
if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil {
- return errors.Wrapf(err, "error decoding container status for container %s", ctr.ID())
+ return fmt.Errorf("error decoding container status for container %s: %w", ctr.ID(), err)
}
ctr.state.PID = state.Pid
@@ -333,29 +333,17 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
case "stopped":
ctr.state.State = define.ContainerStateStopped
default:
- return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s",
- ctr.ID(), state.Status)
+ return fmt.Errorf("unrecognized status returned by runtime for container %s: %s: %w",
+ ctr.ID(), state.Status, define.ErrInternal)
}
// Only grab exit status if we were not already stopped
// If we were, it should already be in the database
if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped {
- var fi os.FileInfo
- chWait := make(chan error)
- defer close(chWait)
-
- _, err := WaitForFile(exitFile, chWait, time.Second*5)
- if err == nil {
- fi, err = os.Stat(exitFile)
- }
- if err != nil {
- ctr.state.ExitCode = -1
- ctr.state.FinishedTime = time.Now()
- logrus.Errorf("No exit file for container %s found: %v", ctr.ID(), err)
- return nil
+ if _, err := ctr.Wait(context.Background()); err != nil {
+ logrus.Errorf("Waiting for container %s to exit: %v", ctr.ID(), err)
}
-
- return ctr.handleExitFile(exitFile, fi)
+ return nil
}
// Handle ContainerStateStopping - keep it unless the container
@@ -411,10 +399,10 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool)
if err2 := r.UpdateContainerStatus(ctr); err2 != nil {
logrus.Infof("Error updating status for container %s: %v", ctr.ID(), err2)
}
- if ctr.state.State == define.ContainerStateExited {
- return nil
+ if ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) {
+ return define.ErrCtrStateInvalid
}
- return errors.Wrapf(err, "error sending signal to container %s", ctr.ID())
+ return fmt.Errorf("error sending signal to container %s: %w", ctr.ID(), err)
}
return nil
@@ -471,7 +459,7 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool)
return nil
}
- return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID())
+ return fmt.Errorf("error sending SIGKILL to container %s: %w", ctr.ID(), err)
}
// Give runtime a few seconds to make it happen
@@ -528,7 +516,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
- return errors.Wrapf(define.ErrInvalidArg, "must specify at least one stream to attach to")
+ return fmt.Errorf("must specify at least one stream to attach to: %w", define.ErrInvalidArg)
}
}
@@ -541,7 +529,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
if streamAttach {
newConn, err := openUnixSocket(attachSock)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", attachSock, err)
}
conn = newConn
defer func() {
@@ -576,12 +564,12 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// Alright, let's hijack.
hijacker, ok := w.(http.Hijacker)
if !ok {
- return errors.Errorf("unable to hijack connection")
+ return fmt.Errorf("unable to hijack connection")
}
httpCon, httpBuf, err := hijacker.Hijack()
if err != nil {
- return errors.Wrapf(err, "error hijacking connection")
+ return fmt.Errorf("error hijacking connection: %w", err)
}
hijackDone <- true
@@ -590,7 +578,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// Force a flush after the header is written.
if err := httpBuf.Flush(); err != nil {
- return errors.Wrapf(err, "error flushing HTTP hijack header")
+ return fmt.Errorf("error flushing HTTP hijack header: %w", err)
}
defer func() {
@@ -705,7 +693,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// Next, STDIN. Avoid entirely if attachStdin unset.
if attachStdin {
go func() {
- _, err := utils.CopyDetachable(conn, httpBuf, detach)
+ _, err := cutil.CopyDetachable(conn, httpBuf, detach)
logrus.Debugf("STDIN copy completed")
stdinChan <- err
}()
@@ -736,7 +724,8 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// isRetryable returns whether the error was caused by a blocked syscall or the
// specified operation on a non blocking file descriptor wasn't ready for completion.
func isRetryable(err error) bool {
- if errno, isErrno := errors.Cause(err).(syscall.Errno); isErrno {
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
return errno == syscall.EINTR || errno == syscall.EAGAIN
}
return false
@@ -751,15 +740,15 @@ func openControlFile(ctr *Container, parentDir string) (*os.File, error) {
return controlFile, nil
}
if !isRetryable(err) {
- return nil, errors.Wrapf(err, "could not open ctl file for terminal resize for container %s", ctr.ID())
+ return nil, fmt.Errorf("could not open ctl file for terminal resize for container %s: %w", ctr.ID(), err)
}
time.Sleep(time.Second / 10)
}
- return nil, errors.Errorf("timeout waiting for %q", controlPath)
+ return nil, fmt.Errorf("timeout waiting for %q", controlPath)
}
// AttachResize resizes the terminal used by the given container.
-func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalSize) error {
+func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize resize.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.bundlePath())
if err != nil {
return err
@@ -768,7 +757,7 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalS
logrus.Debugf("Received a resize event for container %s: %+v", ctr.ID(), newSize)
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
- return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ return fmt.Errorf("failed to write to ctl file to resize terminal: %w", err)
}
return nil
@@ -876,7 +865,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
if err == unix.ESRCH {
return false, nil
}
- return false, errors.Wrapf(err, "error pinging container %s conmon with signal 0", ctr.ID())
+ return false, fmt.Errorf("error pinging container %s conmon with signal 0: %w", ctr.ID(), err)
}
return true, nil
}
@@ -908,7 +897,7 @@ func (r *ConmonOCIRuntime) SupportsKVM() bool {
// AttachSocketPath is the path to a single container's attach socket.
func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
if ctr == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get attach socket path")
+ return "", fmt.Errorf("must provide a valid container to get attach socket path: %w", define.ErrInvalidArg)
}
return filepath.Join(ctr.bundlePath(), "attach"), nil
@@ -917,7 +906,7 @@ func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
// ExitFilePath is the path to a container's exit file.
func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path")
+ return "", fmt.Errorf("must provide a valid container to get exit file path: %w", define.ErrInvalidArg)
}
return filepath.Join(r.exitsDir, ctr.ID()), nil
}
@@ -928,11 +917,11 @@ func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntime
conmonPackage := packageVersion(r.conmonPath)
runtimeVersion, err := r.getOCIRuntimeVersion()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error getting version of OCI runtime %s", r.name)
+ return nil, nil, fmt.Errorf("error getting version of OCI runtime %s: %w", r.name, err)
}
conmonVersion, err := r.getConmonVersion()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error getting conmon version")
+ return nil, nil, fmt.Errorf("error getting conmon version: %w", err)
}
conmon := define.ConmonInfo{
@@ -1002,7 +991,7 @@ func waitPidStop(pid int, timeout time.Duration) error {
return nil
case <-time.After(timeout):
close(chControl)
- return errors.Errorf("given PIDs did not die within timeout")
+ return fmt.Errorf("given PIDs did not die within timeout")
}
}
@@ -1014,11 +1003,11 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) {
data, err := ctr.inspectLocked(false)
if err != nil {
// FIXME: this error should probably be returned
- return "", nil // nolint: nilerr
+ return "", nil //nolint: nilerr
}
tmpl, err := template.New("container").Parse(logTag)
if err != nil {
- return "", errors.Wrapf(err, "template parsing error %s", logTag)
+ return "", fmt.Errorf("template parsing error %s: %w", logTag, err)
}
var b bytes.Buffer
err = tmpl.Execute(&b, data)
@@ -1039,13 +1028,13 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
- return 0, errors.Wrapf(err, "error creating socket pair")
+ return 0, fmt.Errorf("error creating socket pair: %w", err)
}
defer errorhandling.CloseQuiet(parentSyncPipe)
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
- return 0, errors.Wrapf(err, "error creating socket pair for start pipe")
+ return 0, fmt.Errorf("error creating socket pair for start pipe: %w", err)
}
defer errorhandling.CloseQuiet(parentStartPipe)
@@ -1166,7 +1155,6 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
}).Debugf("running conmon: %s", r.conmonPath)
cmd := exec.Command(r.conmonPath, args...)
- cmd.Dir = ctr.bundlePath()
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
@@ -1217,12 +1205,12 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
if havePortMapping {
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
if err != nil {
- return 0, errors.Wrapf(err, "failed to create rootless port sync pipe")
+ return 0, fmt.Errorf("failed to create rootless port sync pipe: %w", err)
}
}
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
- return 0, errors.Wrapf(err, "failed to create rootless network sync pipe")
+ return 0, fmt.Errorf("failed to create rootless network sync pipe: %w", err)
}
} else {
if ctr.rootlessSlirpSyncR != nil {
@@ -1354,8 +1342,6 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
logDriverArg = define.NoLogging
case define.PassthroughLogging:
logDriverArg = define.PassthroughLogging
- case define.JSONLogging:
- fallthrough
//lint:ignore ST1015 the default case has to be here
default: //nolint:stylecheck,gocritic
// No case here should happen except JSONLogging, but keep this here in case the options are extended
@@ -1365,6 +1351,8 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
// to get here, either a user would specify `--log-driver ""`, or this came from another place in libpod
// since the former case is obscure, and the latter case isn't an error, let's silently fallthrough
fallthrough
+ case define.JSONLogging:
+ fallthrough
case define.KubernetesLogging:
logDriverArg = fmt.Sprintf("%s:%s", define.KubernetesLogging, logPath)
}
@@ -1435,7 +1423,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
}
// $INVOCATION_ID is set by systemd when running as a service.
- if os.Getenv("INVOCATION_ID") != "" {
+ if ctr.runtime.RemoteURI() == "" && os.Getenv("INVOCATION_ID") != "" {
mustCreateCgroup = false
}
@@ -1451,9 +1439,14 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
// TODO: This should be a switch - we are not guaranteed that
// there are only 2 valid cgroup managers
cgroupParent := ctr.CgroupParent()
+ cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
+ Resource := ctr.Spec().Linux.Resources
+ cgroupResources, err := GetLimits(Resource)
+ if err != nil {
+ logrus.StandardLogger().Log(logLevel, "Could not get ctr resources")
+ }
if ctr.CgroupManager() == config.SystemdCgroupsManager {
unitName := createUnitName("libpod-conmon", ctr.ID())
-
realCgroupParent := cgroupParent
splitParent := strings.Split(cgroupParent, "/")
if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 {
@@ -1465,8 +1458,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to systemd sandbox cgroup: %v", err)
}
} else {
- cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
- control, err := cgroups.New(cgroupPath, &spec.LinuxResources{})
+ control, err := cgroups.New(cgroupPath, &cgroupResources)
if err != nil {
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
} else if err := control.AddPid(cmd.Process.Pid); err != nil {
@@ -1555,7 +1547,7 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
}
}
}
- return -1, errors.Wrapf(ss.err, "container create failed (no logs from conmon)")
+ return -1, fmt.Errorf("container create failed (no logs from conmon): %w", ss.err)
}
logrus.Debugf("Received: %d", ss.si.Data)
if ss.si.Data < 0 {
@@ -1572,11 +1564,11 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
if ss.si.Message != "" {
return ss.si.Data, getOCIRuntimeError(runtimeName, ss.si.Message)
}
- return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed")
+ return ss.si.Data, fmt.Errorf("container create failed: %w", define.ErrInternal)
}
data = ss.si.Data
case <-time.After(define.ContainerCreateTimeout):
- return -1, errors.Wrapf(define.ErrInternal, "container creation timeout")
+ return -1, fmt.Errorf("container creation timeout: %w", define.ErrInternal)
}
return data, nil
}
@@ -1748,3 +1740,191 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
}
}
}
+
+// GetLimits converts spec resource limits to cgroup consumable limits
+func GetLimits(resource *spec.LinuxResources) (runcconfig.Resources, error) {
+ if resource == nil {
+ resource = &spec.LinuxResources{}
+ }
+ final := &runcconfig.Resources{}
+ devs := []*devices.Rule{}
+
+ // Devices
+ for _, entry := range resource.Devices {
+ if entry.Major == nil || entry.Minor == nil {
+ continue
+ }
+ runeType := 'a'
+ switch entry.Type {
+ case "b":
+ runeType = 'b'
+ case "c":
+ runeType = 'c'
+ }
+
+ devs = append(devs, &devices.Rule{
+ Type: devices.Type(runeType),
+ Major: *entry.Major,
+ Minor: *entry.Minor,
+ Permissions: devices.Permissions(entry.Access),
+ Allow: entry.Allow,
+ })
+ }
+ final.Devices = devs
+
+ // HugepageLimits
+ pageLimits := []*runcconfig.HugepageLimit{}
+ for _, entry := range resource.HugepageLimits {
+ pageLimits = append(pageLimits, &runcconfig.HugepageLimit{
+ Pagesize: entry.Pagesize,
+ Limit: entry.Limit,
+ })
+ }
+ final.HugetlbLimit = pageLimits
+
+ // Networking
+ netPriorities := []*runcconfig.IfPrioMap{}
+ if resource.Network != nil {
+ for _, entry := range resource.Network.Priorities {
+ netPriorities = append(netPriorities, &runcconfig.IfPrioMap{
+ Interface: entry.Name,
+ Priority: int64(entry.Priority),
+ })
+ }
+ }
+ final.NetPrioIfpriomap = netPriorities
+ rdma := make(map[string]runcconfig.LinuxRdma)
+ for name, entry := range resource.Rdma {
+ rdma[name] = runcconfig.LinuxRdma{HcaHandles: entry.HcaHandles, HcaObjects: entry.HcaObjects}
+ }
+ final.Rdma = rdma
+
+ // Memory
+ if resource.Memory != nil {
+ if resource.Memory.Limit != nil {
+ final.Memory = *resource.Memory.Limit
+ }
+ if resource.Memory.Reservation != nil {
+ final.MemoryReservation = *resource.Memory.Reservation
+ }
+ if resource.Memory.Swap != nil {
+ final.MemorySwap = *resource.Memory.Swap
+ }
+ if resource.Memory.Swappiness != nil {
+ final.MemorySwappiness = resource.Memory.Swappiness
+ }
+ }
+
+ // CPU
+ if resource.CPU != nil {
+ if resource.CPU.Period != nil {
+ final.CpuPeriod = *resource.CPU.Period
+ }
+ if resource.CPU.Quota != nil {
+ final.CpuQuota = *resource.CPU.Quota
+ }
+ if resource.CPU.RealtimePeriod != nil {
+ final.CpuRtPeriod = *resource.CPU.RealtimePeriod
+ }
+ if resource.CPU.RealtimeRuntime != nil {
+ final.CpuRtRuntime = *resource.CPU.RealtimeRuntime
+ }
+ if resource.CPU.Shares != nil {
+ final.CpuShares = *resource.CPU.Shares
+ }
+ final.CpusetCpus = resource.CPU.Cpus
+ final.CpusetMems = resource.CPU.Mems
+ }
+
+ // BlkIO
+ if resource.BlockIO != nil {
+ if len(resource.BlockIO.ThrottleReadBpsDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleReadBpsDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleReadBpsDevice = append(final.BlkioThrottleReadBpsDevice, throttle)
+ }
+ }
+ if len(resource.BlockIO.ThrottleWriteBpsDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleWriteBpsDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleWriteBpsDevice = append(final.BlkioThrottleWriteBpsDevice, throttle)
+ }
+ }
+ if len(resource.BlockIO.ThrottleReadIOPSDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleReadIOPSDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleReadIOPSDevice = append(final.BlkioThrottleReadIOPSDevice, throttle)
+ }
+ }
+ if len(resource.BlockIO.ThrottleWriteIOPSDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleWriteIOPSDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleWriteIOPSDevice = append(final.BlkioThrottleWriteIOPSDevice, throttle)
+ }
+ }
+ if resource.BlockIO.LeafWeight != nil {
+ final.BlkioLeafWeight = *resource.BlockIO.LeafWeight
+ }
+ if resource.BlockIO.Weight != nil {
+ final.BlkioWeight = *resource.BlockIO.Weight
+ }
+ if len(resource.BlockIO.WeightDevice) > 0 {
+ for _, entry := range resource.BlockIO.WeightDevice {
+ weight := &runcconfig.WeightDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ if entry.Weight != nil {
+ weight.Weight = *entry.Weight
+ }
+ if entry.LeafWeight != nil {
+ weight.LeafWeight = *entry.LeafWeight
+ }
+ weight.BlockIODevice = *dev
+ final.BlkioWeightDevice = append(final.BlkioWeightDevice, weight)
+ }
+ }
+ }
+
+ // Pids
+ if resource.Pids != nil {
+ final.PidsLimit = resource.Pids.Limit
+ }
+
+ // Networking
+ if resource.Network != nil {
+ if resource.Network.ClassID != nil {
+ final.NetClsClassid = *resource.Network.ClassID
+ }
+ }
+
+ // Unified state
+ final.Unified = resource.Unified
+
+ return *final, nil
+}
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 86f54c02e..2ab2b4577 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -6,8 +6,8 @@ import (
"path/filepath"
"sync"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -108,24 +108,29 @@ func (r *MissingRuntime) UnpauseContainer(ctr *Container) error {
return r.printError()
}
+// Attach is not available as the runtime is missing
+func (r *MissingRuntime) Attach(ctr *Container, params *AttachOptions) error {
+ return r.printError()
+}
+
// HTTPAttach is not available as the runtime is missing
func (r *MissingRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) error {
return r.printError()
}
// AttachResize is not available as the runtime is missing
-func (r *MissingRuntime) AttachResize(ctr *Container, newSize define.TerminalSize) error {
+func (r *MissingRuntime) AttachResize(ctr *Container, newSize resize.TerminalSize) error {
return r.printError()
}
// ExecContainer is not available as the runtime is missing
-func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *define.TerminalSize) (int, chan error, error) {
+func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *resize.TerminalSize) (int, chan error, error) {
return -1, nil, r.printError()
}
// ExecContainerHTTP is not available as the runtime is missing
func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, req *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *define.TerminalSize) (int, chan error, error) {
+ streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *resize.TerminalSize) (int, chan error, error) {
return -1, nil, r.printError()
}
@@ -135,7 +140,7 @@ func (r *MissingRuntime) ExecContainerDetached(ctr *Container, sessionID string,
}
// ExecAttachResize is not available as the runtime is missing.
-func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error {
+func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize resize.TerminalSize) error {
return r.printError()
}
@@ -204,7 +209,7 @@ func (r *MissingRuntime) ExecAttachSocketPath(ctr *Container, sessionID string)
// the container, but Conmon should still place an exit file for it.
func (r *MissingRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path")
+ return "", fmt.Errorf("must provide a valid container to get exit file path: %w", define.ErrInvalidArg)
}
return filepath.Join(r.exitsDir, ctr.ID()), nil
}
@@ -222,5 +227,5 @@ func (r *MissingRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeIn
// Return an error indicating the runtime is missing
func (r *MissingRuntime) printError() error {
- return errors.Wrapf(define.ErrOCIRuntimeNotFound, "runtime %s is missing", r.name)
+ return fmt.Errorf("runtime %s is missing: %w", r.name, define.ErrOCIRuntimeNotFound)
}
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index 64edfdef2..e118348fa 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -70,7 +69,7 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
addr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", hostIP, port))
}
if err != nil {
- return nil, errors.Wrapf(err, "cannot resolve the UDP address")
+ return nil, fmt.Errorf("cannot resolve the UDP address: %w", err)
}
proto := "udp4"
@@ -79,11 +78,11 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
}
server, err := net.ListenUDP(proto, addr)
if err != nil {
- return nil, errors.Wrapf(err, "cannot listen on the UDP port")
+ return nil, fmt.Errorf("cannot listen on the UDP port: %w", err)
}
file, err = server.File()
if err != nil {
- return nil, errors.Wrapf(err, "cannot get file for UDP socket")
+ return nil, fmt.Errorf("cannot get file for UDP socket: %w", err)
}
// close the listener
// note that this does not affect the fd, see the godoc for server.File()
@@ -103,7 +102,7 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
addr, err = net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", hostIP, port))
}
if err != nil {
- return nil, errors.Wrapf(err, "cannot resolve the TCP address")
+ return nil, fmt.Errorf("cannot resolve the TCP address: %w", err)
}
proto := "tcp4"
@@ -112,11 +111,11 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
}
server, err := net.ListenTCP(proto, addr)
if err != nil {
- return nil, errors.Wrapf(err, "cannot listen on the TCP port")
+ return nil, fmt.Errorf("cannot listen on the TCP port: %w", err)
}
file, err = server.File()
if err != nil {
- return nil, errors.Wrapf(err, "cannot get file for TCP socket")
+ return nil, fmt.Errorf("cannot get file for TCP socket: %w", err)
}
// close the listener
// note that this does not affect the fd, see the godoc for server.File()
@@ -144,14 +143,14 @@ func getOCIRuntimeError(name, runtimeMsg string) error {
if includeFullOutput {
errStr = runtimeMsg
}
- return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrOCIRuntimePermissionDenied)
}
if match := regexp.MustCompile("(?i).*executable file not found in.*|.*no such file or directory.*").FindString(runtimeMsg); match != "" {
errStr := match
if includeFullOutput {
errStr = runtimeMsg
}
- return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrOCIRuntimeNotFound)
}
if match := regexp.MustCompile("`/proc/[a-z0-9-].+/attr.*`").FindString(runtimeMsg); match != "" {
errStr := match
@@ -159,11 +158,11 @@ func getOCIRuntimeError(name, runtimeMsg string) error {
errStr = runtimeMsg
}
if strings.HasSuffix(match, "/exec`") {
- return errors.Wrapf(define.ErrSetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrSetSecurityAttribute)
} else if strings.HasSuffix(match, "/current`") {
- return errors.Wrapf(define.ErrGetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrGetSecurityAttribute)
}
- return errors.Wrapf(define.ErrSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrSecurityAttribute)
}
- return errors.Wrapf(define.ErrOCIRuntime, "%s: %s", name, strings.Trim(runtimeMsg, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(runtimeMsg, "\n"), define.ErrOCIRuntime)
}
diff --git a/libpod/options.go b/libpod/options.go
index feb89510f..f03980017 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"net"
"os"
@@ -12,6 +13,7 @@ import (
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/secrets"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/libpod/define"
@@ -24,7 +26,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -140,7 +141,7 @@ func WithOCIRuntime(runtime string) RuntimeOption {
}
if runtime == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path")
+ return fmt.Errorf("must provide a valid path: %w", define.ErrInvalidArg)
}
rt.config.Engine.OCIRuntime = runtime
@@ -158,7 +159,7 @@ func WithConmonPath(path string) RuntimeOption {
}
if path == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path")
+ return fmt.Errorf("must provide a valid path: %w", define.ErrInvalidArg)
}
rt.config.Engine.ConmonPath = []string{path}
@@ -218,8 +219,8 @@ func WithCgroupManager(manager string) RuntimeOption {
}
if manager != config.CgroupfsCgroupsManager && manager != config.SystemdCgroupsManager {
- return errors.Wrapf(define.ErrInvalidArg, "Cgroup manager must be one of %s and %s",
- config.CgroupfsCgroupsManager, config.SystemdCgroupsManager)
+ return fmt.Errorf("cgroup manager must be one of %s and %s: %w",
+ config.CgroupfsCgroupsManager, config.SystemdCgroupsManager, define.ErrInvalidArg)
}
rt.config.Engine.CgroupManager = manager
@@ -249,7 +250,7 @@ func WithRegistriesConf(path string) RuntimeOption {
logrus.Debugf("Setting custom registries.conf: %q", path)
return func(rt *Runtime) error {
if _, err := os.Stat(path); err != nil {
- return errors.Wrap(err, "locating specified registries.conf")
+ return fmt.Errorf("locating specified registries.conf: %w", err)
}
if rt.imageContext == nil {
rt.imageContext = &types.SystemContext{
@@ -271,7 +272,7 @@ func WithHooksDir(hooksDirs ...string) RuntimeOption {
for _, hooksDir := range hooksDirs {
if hooksDir == "" {
- return errors.Wrap(define.ErrInvalidArg, "empty-string hook directories are not supported")
+ return fmt.Errorf("empty-string hook directories are not supported: %w", define.ErrInvalidArg)
}
}
@@ -434,6 +435,21 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
+// WithReset instructs libpod to reset all storage to factory defaults.
+// All containers, pods, volumes, images, and networks will be removed.
+// All directories created by Libpod will be removed.
+func WithReset() RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return define.ErrRuntimeFinalized
+ }
+
+ rt.doReset = true
+
+ return nil
+ }
+}
+
// WithRenumber instructs libpod to perform a lock renumbering while
// initializing. This will handle migrations from early versions of libpod with
// file locks to newer versions with SHM locking, as well as changes in the
@@ -478,7 +494,7 @@ func WithMigrateRuntime(requestedRuntime string) RuntimeOption {
}
if requestedRuntime == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty name for new runtime")
+ return fmt.Errorf("must provide a non-empty name for new runtime: %w", define.ErrInvalidArg)
}
rt.migrateRuntime = requestedRuntime
@@ -497,7 +513,7 @@ func WithEventsLogger(logger string) RuntimeOption {
}
if !events.IsValidEventer(logger) {
- return errors.Wrapf(define.ErrInvalidArg, "%q is not a valid events backend", logger)
+ return fmt.Errorf("%q is not a valid events backend: %w", logger, define.ErrInvalidArg)
}
rt.config.Engine.EventsLogger = logger
@@ -605,8 +621,8 @@ func WithSdNotifyMode(mode string) CtrCreateOption {
}
// verify values
- if len(mode) > 0 && !util.StringInSlice(strings.ToLower(mode), SdNotifyModeValues) {
- return errors.Wrapf(define.ErrInvalidArg, "--sdnotify values must be one of %q", strings.Join(SdNotifyModeValues, ", "))
+ if len(mode) > 0 && !cutil.StringInSlice(strings.ToLower(mode), SdNotifyModeValues) {
+ return fmt.Errorf("--sdnotify values must be one of %q: %w", strings.Join(SdNotifyModeValues, ", "), define.ErrInvalidArg)
}
ctr.config.SdNotifyMode = mode
@@ -754,9 +770,9 @@ func WithStopSignal(signal syscall.Signal) CtrCreateOption {
}
if signal == 0 {
- return errors.Wrapf(define.ErrInvalidArg, "stop signal cannot be 0")
+ return fmt.Errorf("stop signal cannot be 0: %w", define.ErrInvalidArg)
} else if signal > 64 {
- return errors.Wrapf(define.ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)")
+ return fmt.Errorf("stop signal cannot be greater than 64 (SIGRTMAX): %w", define.ErrInvalidArg)
}
ctr.config.StopSignal = uint(signal)
@@ -1064,11 +1080,11 @@ func WithLogDriver(driver string) CtrCreateOption {
}
switch driver {
case "":
- return errors.Wrapf(define.ErrInvalidArg, "log driver must be set")
+ return fmt.Errorf("log driver must be set: %w", define.ErrInvalidArg)
case define.JournaldLogging, define.KubernetesLogging, define.JSONLogging, define.NoLogging, define.PassthroughLogging:
break
default:
- return errors.Wrapf(define.ErrInvalidArg, "invalid log driver")
+ return fmt.Errorf("invalid log driver: %w", define.ErrInvalidArg)
}
ctr.config.LogDriver = driver
@@ -1084,7 +1100,7 @@ func WithLogPath(path string) CtrCreateOption {
return define.ErrCtrFinalized
}
if path == "" {
- return errors.Wrapf(define.ErrInvalidArg, "log path must be set")
+ return fmt.Errorf("log path must be set: %w", define.ErrInvalidArg)
}
ctr.config.LogPath = path
@@ -1100,7 +1116,7 @@ func WithLogTag(tag string) CtrCreateOption {
return define.ErrCtrFinalized
}
if tag == "" {
- return errors.Wrapf(define.ErrInvalidArg, "log tag must be set")
+ return fmt.Errorf("log tag must be set: %w", define.ErrInvalidArg)
}
ctr.config.LogTag = tag
@@ -1123,7 +1139,7 @@ func WithCgroupsMode(mode string) CtrCreateOption {
case "enabled", "no-conmon", cgroupSplit:
ctr.config.CgroupsMode = mode
default:
- return errors.Wrapf(define.ErrInvalidArg, "Invalid cgroup mode %q", mode)
+ return fmt.Errorf("invalid cgroup mode %q: %w", mode, define.ErrInvalidArg)
}
return nil
@@ -1138,7 +1154,7 @@ func WithCgroupParent(parent string) CtrCreateOption {
}
if parent == "" {
- return errors.Wrapf(define.ErrInvalidArg, "cgroup parent cannot be empty")
+ return fmt.Errorf("cgroup parent cannot be empty: %w", define.ErrInvalidArg)
}
ctr.config.CgroupParent = parent
@@ -1168,7 +1184,7 @@ func WithDNS(dnsServers []string) CtrCreateOption {
for _, i := range dnsServers {
result := net.ParseIP(i)
if result == nil {
- return errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i)
+ return fmt.Errorf("invalid IP address %s: %w", i, define.ErrInvalidArg)
}
dns = append(dns, result)
}
@@ -1185,7 +1201,7 @@ func WithDNSOption(dnsOptions []string) CtrCreateOption {
return define.ErrCtrFinalized
}
if ctr.config.UseImageResolvConf {
- return errors.Wrapf(define.ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf")
+ return fmt.Errorf("cannot add DNS options if container will not create /etc/resolv.conf: %w", define.ErrInvalidArg)
}
ctr.config.DNSOption = append(ctr.config.DNSOption, dnsOptions...)
return nil
@@ -1359,7 +1375,7 @@ func WithRestartPolicy(policy string) CtrCreateOption {
case define.RestartPolicyNone, define.RestartPolicyNo, define.RestartPolicyOnFailure, define.RestartPolicyAlways, define.RestartPolicyUnlessStopped:
ctr.config.RestartPolicy = policy
default:
- return errors.Wrapf(define.ErrInvalidArg, "%q is not a valid restart policy", policy)
+ return fmt.Errorf("%q is not a valid restart policy: %w", policy, define.ErrInvalidArg)
}
return nil
@@ -1391,7 +1407,7 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
for _, vol := range volumes {
mountOpts, err := util.ProcessOptions(vol.Options, false, "")
if err != nil {
- return errors.Wrapf(err, "processing options for named volume %q mounted at %q", vol.Name, vol.Dest)
+ return fmt.Errorf("processing options for named volume %q mounted at %q: %w", vol.Name, vol.Dest, err)
}
ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{
@@ -1677,6 +1693,18 @@ func withSetAnon() VolumeCreateOption {
}
}
+// WithVolumeDriverTimeout sets the volume creation timeout period
+func WithVolumeDriverTimeout(timeout int) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return define.ErrVolumeFinalized
+ }
+
+ volume.config.Timeout = timeout
+ return nil
+ }
+}
+
// WithTimezone sets the timezone in the container
func WithTimezone(path string) CtrCreateOption {
return func(ctr *Container) error {
@@ -1692,7 +1720,7 @@ func WithTimezone(path string) CtrCreateOption {
}
// We don't want to mount a timezone directory
if file.IsDir() {
- return errors.New("Invalid timezone: is a directory")
+ return errors.New("invalid timezone: is a directory")
}
}
@@ -1708,7 +1736,7 @@ func WithUmask(umask string) CtrCreateOption {
return define.ErrCtrFinalized
}
if !define.UmaskRegex.MatchString(umask) {
- return errors.Wrapf(define.ErrInvalidArg, "Invalid umask string %s", umask)
+ return fmt.Errorf("invalid umask string %s: %w", umask, define.ErrInvalidArg)
}
ctr.config.Umask = umask
return nil
@@ -1781,7 +1809,7 @@ func WithInitCtrType(containerType string) CtrCreateOption {
ctr.config.InitContainerType = containerType
return nil
}
- return errors.Errorf("%s is invalid init container type", containerType)
+ return fmt.Errorf("%s is invalid init container type", containerType)
}
}
@@ -1796,7 +1824,7 @@ func WithHostDevice(dev []specs.LinuxDevice) CtrCreateOption {
}
}
-// WithSelectedPasswordManagement makes it so that the container either does or does not setup /etc/passwd or /etc/group
+// WithSelectedPasswordManagement makes it so that the container either does or does not set up /etc/passwd or /etc/group
func WithSelectedPasswordManagement(passwd *bool) CtrCreateOption {
return func(c *Container) error {
if c.valid {
@@ -1815,12 +1843,12 @@ func WithInfraConfig(compatibleOptions InfraInherit) CtrCreateOption {
}
compatMarshal, err := json.Marshal(compatibleOptions)
if err != nil {
- return errors.New("Could not marshal compatible options")
+ return errors.New("could not marshal compatible options")
}
err = json.Unmarshal(compatMarshal, ctr.config)
if err != nil {
- return errors.New("Could not unmarshal compatible options into contrainer config")
+ return errors.New("could not unmarshal compatible options into contrainer config")
}
return nil
}
@@ -2158,3 +2186,17 @@ func WithPasswdEntry(passwdEntry string) CtrCreateOption {
return nil
}
}
+
+// WithMountAllDevices sets the option to mount all of a privileged container's
+// host devices
+func WithMountAllDevices() CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ ctr.config.MountAllDevices = true
+
+ return nil
+ }
+}
diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go
index a6d66a034..0a5eaae53 100644
--- a/libpod/plugin/volume_api.go
+++ b/libpod/plugin/volume_api.go
@@ -3,6 +3,7 @@ package plugin
import (
"bytes"
"context"
+ "fmt"
"io/ioutil"
"net"
"net/http"
@@ -12,19 +13,17 @@ import (
"sync"
"time"
+ "errors"
+
"github.com/containers/podman/v4/libpod/define"
"github.com/docker/go-plugins-helpers/sdk"
"github.com/docker/go-plugins-helpers/volume"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
-// TODO: We should add syntax for specifying plugins to containers.conf, and
-// support for loading based on that.
-
// Copied from docker/go-plugins-helpers/volume/api.go - not exported, so we
// need to do this to get at them.
// These are well-established paths that should not change unless the plugin API
@@ -38,8 +37,6 @@ var (
hostVirtualPath = "/VolumeDriver.Path"
mountPath = "/VolumeDriver.Mount"
unmountPath = "/VolumeDriver.Unmount"
- // nolint
- capabilitiesPath = "/VolumeDriver.Capabilities"
)
const (
@@ -80,7 +77,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
// Hit the Activate endpoint to find out if it is, and if so what kind
req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil)
if err != nil {
- return errors.Wrapf(err, "error making request to volume plugin %s activation endpoint", newPlugin.Name)
+ return fmt.Errorf("error making request to volume plugin %s activation endpoint: %w", newPlugin.Name, err)
}
req.Header.Set("Host", newPlugin.getURI())
@@ -88,25 +85,25 @@ func validatePlugin(newPlugin *VolumePlugin) error {
resp, err := newPlugin.Client.Do(req)
if err != nil {
- return errors.Wrapf(err, "error sending request to plugin %s activation endpoint", newPlugin.Name)
+ return fmt.Errorf("error sending request to plugin %s activation endpoint: %w", newPlugin.Name, err)
}
defer resp.Body.Close()
// Response code MUST be 200. Anything else, we have to assume it's not
// a valid plugin.
if resp.StatusCode != 200 {
- return errors.Wrapf(ErrNotPlugin, "got status code %d from activation endpoint for plugin %s", resp.StatusCode, newPlugin.Name)
+ return fmt.Errorf("got status code %d from activation endpoint for plugin %s: %w", resp.StatusCode, newPlugin.Name, ErrNotPlugin)
}
// Read and decode the body so we can tell if this is a volume plugin.
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return errors.Wrapf(err, "error reading activation response body from plugin %s", newPlugin.Name)
+ return fmt.Errorf("error reading activation response body from plugin %s: %w", newPlugin.Name, err)
}
respStruct := new(activateResponse)
if err := json.Unmarshal(respBytes, respStruct); err != nil {
- return errors.Wrapf(err, "error unmarshalling plugin %s activation response", newPlugin.Name)
+ return fmt.Errorf("error unmarshalling plugin %s activation response: %w", newPlugin.Name, err)
}
foundVolume := false
@@ -118,7 +115,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
}
if !foundVolume {
- return errors.Wrapf(ErrNotVolumePlugin, "plugin %s does not implement volume plugin, instead provides %s", newPlugin.Name, strings.Join(respStruct.Implements, ", "))
+ return fmt.Errorf("plugin %s does not implement volume plugin, instead provides %s: %w", newPlugin.Name, strings.Join(respStruct.Implements, ", "), ErrNotVolumePlugin)
}
if plugins == nil {
@@ -132,7 +129,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
// GetVolumePlugin gets a single volume plugin, with the given name, at the
// given path.
-func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
+func GetVolumePlugin(name string, path string, timeout int) (*VolumePlugin, error) {
pluginsLock.Lock()
defer pluginsLock.Unlock()
@@ -140,7 +137,7 @@ func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
if exists {
// This shouldn't be possible, but just in case...
if plugin.SocketPath != filepath.Clean(path) {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested path %q for volume plugin %s does not match pre-existing path for plugin, %q", path, name, plugin.SocketPath)
+ return nil, fmt.Errorf("requested path %q for volume plugin %s does not match pre-existing path for plugin, %q: %w", path, name, plugin.SocketPath, define.ErrInvalidArg)
}
return plugin, nil
@@ -156,6 +153,13 @@ func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
// And since we can reuse it, might as well cache it.
client := new(http.Client)
client.Timeout = defaultTimeout
+ // if the user specified a non-zero timeout, use their value. Else, keep the default.
+ if timeout != 0 {
+ if time.Duration(timeout)*time.Second < defaultTimeout {
+ logrus.Warnf("the default timeout for volume creation is %d seconds, setting a time less than that may break this feature.", defaultTimeout)
+ }
+ client.Timeout = time.Duration(timeout) * time.Second
+ }
// This bit borrowed from pkg/bindings/connection.go
client.Transport = &http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
@@ -167,10 +171,10 @@ func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
stat, err := os.Stat(newPlugin.SocketPath)
if err != nil {
- return nil, errors.Wrapf(err, "cannot access plugin %s socket %q", name, newPlugin.SocketPath)
+ return nil, fmt.Errorf("cannot access plugin %s socket %q: %w", name, newPlugin.SocketPath, err)
}
if stat.Mode()&os.ModeSocket == 0 {
- return nil, errors.Wrapf(ErrNotPlugin, "volume %s path %q is not a unix socket", name, newPlugin.SocketPath)
+ return nil, fmt.Errorf("volume %s path %q is not a unix socket: %w", name, newPlugin.SocketPath, ErrNotPlugin)
}
if err := validatePlugin(newPlugin); err != nil {
@@ -185,40 +189,39 @@ func (p *VolumePlugin) getURI() string {
}
// Verify the plugin is still available.
-// TODO: Do we want to ping with an HTTP request? There's no ping endpoint so
-// we'd need to hit Activate or Capabilities?
+// Does not actually ping the API, just verifies that the socket still exists.
func (p *VolumePlugin) verifyReachable() error {
if _, err := os.Stat(p.SocketPath); err != nil {
if os.IsNotExist(err) {
pluginsLock.Lock()
defer pluginsLock.Unlock()
delete(plugins, p.Name)
- return errors.Wrapf(ErrPluginRemoved, p.Name)
+ return fmt.Errorf("%s: %w", p.Name, ErrPluginRemoved)
}
- return errors.Wrapf(err, "error accessing plugin %s", p.Name)
+ return fmt.Errorf("error accessing plugin %s: %w", p.Name, err)
}
return nil
}
// Send a request to the volume plugin for handling.
// Callers *MUST* close the response when they are done.
-func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint string) (*http.Response, error) {
+func (p *VolumePlugin) sendRequest(toJSON interface{}, endpoint string) (*http.Response, error) {
var (
reqJSON []byte
err error
)
- if hasBody {
+ if toJSON != nil {
reqJSON, err = json.Marshal(toJSON)
if err != nil {
- return nil, errors.Wrapf(err, "error marshalling request JSON for volume plugin %s endpoint %s", p.Name, endpoint)
+ return nil, fmt.Errorf("error marshalling request JSON for volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
}
}
req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON))
if err != nil {
- return nil, errors.Wrapf(err, "error making request to volume plugin %s endpoint %s", p.Name, endpoint)
+ return nil, fmt.Errorf("error making request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
}
req.Header.Set("Host", p.getURI())
@@ -226,7 +229,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st
resp, err := p.Client.Do(req)
if err != nil {
- return nil, errors.Wrapf(err, "error sending request to volume plugin %s endpoint %s", p.Name, endpoint)
+ return nil, fmt.Errorf("error sending request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
}
// We are *deliberately not closing* response here. It is the
// responsibility of the caller to do so after reading the response.
@@ -240,9 +243,9 @@ func (p *VolumePlugin) makeErrorResponse(err, endpoint, volName string) error {
err = "empty error from plugin"
}
if volName != "" {
- return errors.Wrapf(errors.New(err), "error on %s on volume %s in volume plugin %s", endpoint, volName, p.Name)
+ return fmt.Errorf("error on %s on volume %s in volume plugin %s: %w", endpoint, volName, p.Name, errors.New(err))
}
- return errors.Wrapf(errors.New(err), "error on %s in volume plugin %s", endpoint, p.Name)
+ return fmt.Errorf("error on %s in volume plugin %s: %w", endpoint, p.Name, errors.New(err))
}
// Handle error responses from plugin
@@ -254,12 +257,12 @@ func (p *VolumePlugin) handleErrorResponse(resp *http.Response, endpoint, volNam
if resp.StatusCode != 200 {
errResp, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
errStruct := new(volume.ErrorResponse)
if err := json.Unmarshal(errResp, errStruct); err != nil {
- return errors.Wrapf(err, "error unmarshalling JSON response from volume plugin %s", p.Name)
+ return fmt.Errorf("error unmarshalling JSON response from volume plugin %s: %w", p.Name, err)
}
return p.makeErrorResponse(errStruct.Err, endpoint, volName)
@@ -271,7 +274,7 @@ func (p *VolumePlugin) handleErrorResponse(resp *http.Response, endpoint, volNam
// CreateVolume creates a volume in the plugin.
func (p *VolumePlugin) CreateVolume(req *volume.CreateRequest) error {
if req == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to CreateVolume")
+ return fmt.Errorf("must provide non-nil request to CreateVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -280,7 +283,7 @@ func (p *VolumePlugin) CreateVolume(req *volume.CreateRequest) error {
logrus.Infof("Creating volume %s using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, createPath)
+ resp, err := p.sendRequest(req, createPath)
if err != nil {
return err
}
@@ -297,7 +300,7 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
logrus.Infof("Listing volumes using plugin %s", p.Name)
- resp, err := p.sendRequest(nil, false, listPath)
+ resp, err := p.sendRequest(nil, listPath)
if err != nil {
return nil, err
}
@@ -307,15 +310,14 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
return nil, err
}
- // TODO: Can probably unify response reading under a helper
volumeRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return nil, errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
volumeResp := new(volume.ListResponse)
if err := json.Unmarshal(volumeRespBytes, volumeResp); err != nil {
- return nil, errors.Wrapf(err, "error unmarshalling volume plugin %s list response", p.Name)
+ return nil, fmt.Errorf("error unmarshalling volume plugin %s list response: %w", p.Name, err)
}
return volumeResp.Volumes, nil
@@ -324,7 +326,7 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
// GetVolume gets a single volume from the plugin.
func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error) {
if req == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to GetVolume")
+ return nil, fmt.Errorf("must provide non-nil request to GetVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -333,7 +335,7 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
logrus.Infof("Getting volume %s using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, getPath)
+ resp, err := p.sendRequest(req, getPath)
if err != nil {
return nil, err
}
@@ -345,12 +347,12 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
getRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return nil, errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
getResp := new(volume.GetResponse)
if err := json.Unmarshal(getRespBytes, getResp); err != nil {
- return nil, errors.Wrapf(err, "error unmarshalling volume plugin %s get response", p.Name)
+ return nil, fmt.Errorf("error unmarshalling volume plugin %s get response: %w", p.Name, err)
}
return getResp.Volume, nil
@@ -359,7 +361,7 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
// RemoveVolume removes a single volume from the plugin.
func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
if req == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to RemoveVolume")
+ return fmt.Errorf("must provide non-nil request to RemoveVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -368,7 +370,7 @@ func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
logrus.Infof("Removing volume %s using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, removePath)
+ resp, err := p.sendRequest(req, removePath)
if err != nil {
return err
}
@@ -380,7 +382,7 @@ func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
// GetVolumePath gets the path the given volume is mounted at.
func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
if req == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to GetVolumePath")
+ return "", fmt.Errorf("must provide non-nil request to GetVolumePath: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -389,7 +391,7 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
logrus.Infof("Getting volume %s path using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, hostVirtualPath)
+ resp, err := p.sendRequest(req, hostVirtualPath)
if err != nil {
return "", err
}
@@ -401,12 +403,12 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
pathRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return "", errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
pathResp := new(volume.PathResponse)
if err := json.Unmarshal(pathRespBytes, pathResp); err != nil {
- return "", errors.Wrapf(err, "error unmarshalling volume plugin %s path response", p.Name)
+ return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err)
}
return pathResp.Mountpoint, nil
@@ -417,7 +419,7 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
// the path the volume has been mounted at.
func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
if req == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to MountVolume")
+ return "", fmt.Errorf("must provide non-nil request to MountVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -426,7 +428,7 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
logrus.Infof("Mounting volume %s using plugin %s for container %s", req.Name, p.Name, req.ID)
- resp, err := p.sendRequest(req, true, mountPath)
+ resp, err := p.sendRequest(req, mountPath)
if err != nil {
return "", err
}
@@ -438,12 +440,12 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
mountRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return "", errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
mountResp := new(volume.MountResponse)
if err := json.Unmarshal(mountRespBytes, mountResp); err != nil {
- return "", errors.Wrapf(err, "error unmarshalling volume plugin %s path response", p.Name)
+ return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err)
}
return mountResp.Mountpoint, nil
@@ -453,7 +455,7 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
// container that is unmounting, used for internal record-keeping by the plugin.
func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {
if req == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to UnmountVolume")
+ return fmt.Errorf("must provide non-nil request to UnmountVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -462,7 +464,7 @@ func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {
logrus.Infof("Unmounting volume %s using plugin %s for container %s", req.Name, p.Name, req.ID)
- resp, err := p.sendRequest(req, true, unmountPath)
+ resp, err := p.sendRequest(req, unmountPath)
if err != nil {
return err
}
diff --git a/libpod/pod.go b/libpod/pod.go
index 3c8dc43d4..e059c9416 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"sort"
"strings"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/lock"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// Pod represents a group of containers that are managed together.
@@ -169,46 +169,42 @@ func (p *Pod) CPUQuota() int64 {
return 0
}
-// NetworkMode returns the Network mode given by the user ex: pod, private...
-func (p *Pod) NetworkMode() string {
+// MemoryLimit returns the pod Memory Limit
+func (p *Pod) MemoryLimit() uint64 {
+ if p.state.InfraContainerID == "" {
+ return 0
+ }
infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
if err != nil {
- return ""
+ return 0
}
- return infra.NetworkMode()
+ conf := infra.config.Spec
+ if conf != nil && conf.Linux != nil && conf.Linux.Resources != nil && conf.Linux.Resources.Memory != nil && conf.Linux.Resources.Memory.Limit != nil {
+ val := *conf.Linux.Resources.Memory.Limit
+ return uint64(val)
+ }
+ return 0
}
-// PidMode returns the PID mode given by the user ex: pod, private...
-func (p *Pod) PidMode() string {
+// NetworkMode returns the Network mode given by the user ex: pod, private...
+func (p *Pod) NetworkMode() string {
infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
if err != nil {
return ""
}
- ctrSpec := infra.config.Spec
- if ctrSpec != nil && ctrSpec.Linux != nil {
- for _, ns := range ctrSpec.Linux.Namespaces {
- if ns.Type == specs.PIDNamespace {
- if ns.Path != "" {
- return fmt.Sprintf("ns:%s", ns.Path)
- }
- return "private"
- }
- }
- return "host"
- }
- return ""
+ return infra.NetworkMode()
}
-// PidMode returns the PID mode given by the user ex: pod, private...
-func (p *Pod) UserNSMode() string {
- infra, err := p.infraContainer()
+// Namespace Mode returns the given NS mode provided by the user ex: host, private...
+func (p *Pod) NamespaceMode(kind specs.LinuxNamespaceType) string {
+ infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
if err != nil {
return ""
}
ctrSpec := infra.config.Spec
if ctrSpec != nil && ctrSpec.Linux != nil {
for _, ns := range ctrSpec.Linux.Namespaces {
- if ns.Type == specs.UserNamespace {
+ if ns.Type == kind {
if ns.Path != "" {
return fmt.Sprintf("ns:%s", ns.Path)
}
@@ -316,7 +312,7 @@ func (p *Pod) CgroupPath() (string, error) {
return "", err
}
if p.state.InfraContainerID == "" {
- return "", errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
+ return "", fmt.Errorf("pod has no infra container: %w", define.ErrNoSuchCtr)
}
return p.state.CgroupPath, nil
}
@@ -390,7 +386,7 @@ func (p *Pod) infraContainer() (*Container, error) {
return nil, err
}
if id == "" {
- return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
+ return nil, fmt.Errorf("pod has no infra container: %w", define.ErrNoSuchCtr)
}
return p.runtime.state.Container(id)
@@ -430,7 +426,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerSta
newStats, err := c.GetContainerStats(previousContainerStats[c.ID()])
// If the container wasn't running, don't include it
// but also suppress the error
- if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid {
+ if err != nil && !errors.Is(err, define.ErrCtrStateInvalid) {
return nil, err
}
if err == nil {
@@ -471,3 +467,14 @@ func (p *Pod) initContainers() ([]*Container, error) {
}
return initCons, nil
}
+
+func (p *Pod) Config() (*PodConfig, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ conf := &PodConfig{}
+
+ err := JSONDeepCopy(p.config, conf)
+
+ return conf, err
+}
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index eede896a9..c1d54d55e 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"github.com/containers/common/pkg/cgroups"
@@ -9,7 +10,7 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/parallel"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/pkg/errors"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@@ -31,7 +32,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
return err
}
if rc != 0 {
- return errors.Errorf("init container %s exited with code %d", initCon.ID(), rc)
+ return fmt.Errorf("init container %s exited with code %d", initCon.ID(), rc)
}
// If the container is a once init container, we need to remove it
// after it runs
@@ -41,7 +42,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
var time *uint
if err := p.runtime.removeContainer(ctx, initCon, false, false, true, time); err != nil {
icLock.Unlock()
- return errors.Wrapf(err, "failed to remove once init container %s", initCon.ID())
+ return fmt.Errorf("failed to remove once init container %s: %w", initCon.ID(), err)
}
// Removing a container this way requires an explicit call to clean up the db
if err := p.runtime.state.RemoveContainerFromPod(p, initCon); err != nil {
@@ -91,12 +92,12 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
// Build a dependency graph of containers in the pod
graph, err := BuildContainerGraph(allCtrs)
if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err)
}
// If there are no containers without dependencies, we can't start
// Error out
if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
}
ctrErrors := make(map[string]error)
@@ -108,7 +109,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error starting some containers")
+ return ctrErrors, fmt.Errorf("error starting some containers: %w", define.ErrPodPartialFail)
}
defer p.newPodEvent(events.Start)
return nil, nil
@@ -152,8 +153,8 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
return nil, err
}
- // TODO: There may be cases where it makes sense to order stops based on
- // dependencies. Should we bother with this?
+ // Stopping pods is not ordered by dependency. We haven't seen any case
+ // where this would actually matter.
ctrErrChan := make(map[string]<-chan error)
@@ -162,8 +163,9 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
c := ctr
logrus.Debugf("Adding parallel job to stop container %s", c.ID())
retChan := parallel.Enqueue(ctx, func() error {
- // TODO: Might be better to batch stop and cleanup
- // together?
+ // Can't batch these without forcing Stop() to hold the
+ // lock for the full duration of the timeout.
+ // We probably don't want to do that.
if timeout > -1 {
if err := c.StopWithTimeout(uint(timeout)); err != nil {
return err
@@ -191,7 +193,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -199,7 +201,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
+ return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail)
}
if err := p.maybeStopServiceContainer(); err != nil {
@@ -295,7 +297,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -303,7 +305,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
+ return ctrErrors, fmt.Errorf("error cleaning up some containers: %w", define.ErrPodPartialFail)
}
if err := p.maybeStopServiceContainer(); err != nil {
@@ -336,10 +338,10 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
if rootless.IsRootless() {
cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
- return nil, errors.Wrap(err, "failed to determine cgroupversion")
+ return nil, fmt.Errorf("failed to determine cgroupversion: %w", err)
}
if !cgroupv2 {
- return nil, errors.Wrap(define.ErrNoCgroups, "can not pause pods containing rootless containers with cgroup V1")
+ return nil, fmt.Errorf("can not pause pods containing rootless containers with cgroup V1: %w", define.ErrNoCgroups)
}
}
@@ -366,7 +368,7 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -374,7 +376,7 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error pausing some containers")
+ return ctrErrors, fmt.Errorf("error pausing some containers: %w", define.ErrPodPartialFail)
}
return nil, nil
}
@@ -422,7 +424,7 @@ func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -430,7 +432,7 @@ func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error unpausing some containers")
+ return ctrErrors, fmt.Errorf("error unpausing some containers: %w", define.ErrPodPartialFail)
}
return nil, nil
}
@@ -468,7 +470,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
// Build a dependency graph of containers in the pod
graph, err := BuildContainerGraph(allCtrs)
if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err)
}
ctrErrors := make(map[string]error)
@@ -477,7 +479,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
// If there are no containers without dependencies, we can't start
// Error out
if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
}
// Traverse the graph beginning at nodes with no dependencies
@@ -486,7 +488,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
+ return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail)
}
p.newPodEvent(events.Stop)
p.newPodEvent(events.Start)
@@ -537,7 +539,7 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -545,7 +547,7 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
+ return ctrErrors, fmt.Errorf("error killing some containers: %w", define.ErrPodPartialFail)
}
if err := p.maybeStopServiceContainer(); err != nil {
@@ -672,8 +674,9 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
infraConfig.CPUPeriod = p.CPUPeriod()
infraConfig.CPUQuota = p.CPUQuota()
infraConfig.CPUSetCPUs = p.ResourceLim().CPU.Cpus
- infraConfig.PidNS = p.PidMode()
- infraConfig.UserNS = p.UserNSMode()
+ infraConfig.PidNS = p.NamespaceMode(specs.PIDNamespace)
+ infraConfig.UserNS = p.NamespaceMode(specs.UserNamespace)
+ infraConfig.UtsNS = p.NamespaceMode(specs.UTSNamespace)
namedVolumes, mounts := infra.SortUserVolumes(infra.config.Spec)
inspectMounts, err = infra.GetMounts(namedVolumes, infra.config.ImageVolumes, mounts)
infraSecurity = infra.GetSecurityOptions()
@@ -749,6 +752,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
CPUSetCPUs: p.ResourceLim().CPU.Cpus,
CPUPeriod: p.CPUPeriod(),
CPUQuota: p.CPUQuota(),
+ MemoryLimit: p.MemoryLimit(),
Mounts: inspectMounts,
Devices: devices,
BlkioDeviceReadBps: deviceLimits,
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 41f745e6c..a86cd6d21 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -9,7 +9,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -39,7 +38,7 @@ func (p *Pod) updatePod() error {
// Save pod state to database
func (p *Pod) save() error {
if err := p.runtime.state.SavePod(p); err != nil {
- return errors.Wrapf(err, "error saving pod %s state", p.ID())
+ return fmt.Errorf("error saving pod %s state: %w", p.ID(), err)
}
return nil
@@ -61,7 +60,7 @@ func (p *Pod) refresh() error {
// Retrieve the pod's lock
lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock %d for pod %s", p.config.LockID, p.ID())
+ return fmt.Errorf("error retrieving lock %d for pod %s: %w", p.config.LockID, p.ID(), err)
}
p.lock = lock
@@ -69,7 +68,7 @@ func (p *Pod) refresh() error {
if p.config.UsePodCgroup {
switch p.runtime.config.Engine.CgroupManager {
case config.SystemdCgroupsManager:
- cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()))
+ cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()), p.ResourceLim())
if err != nil {
logrus.Errorf("Creating Cgroup for pod %s: %v", p.ID(), err)
}
@@ -81,7 +80,7 @@ func (p *Pod) refresh() error {
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
}
default:
- return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.Engine.CgroupManager)
+ return fmt.Errorf("unknown cgroups manager %s specified: %w", p.runtime.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
diff --git a/libpod/pod_top_linux.go b/libpod/pod_top_linux.go
index 83a070807..544126dcd 100644
--- a/libpod/pod_top_linux.go
+++ b/libpod/pod_top_linux.go
@@ -53,7 +53,7 @@ func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) {
}
}
- // TODO: psgo returns a [][]string to give users the ability to apply
+ // NOTE: psgo returns a [][]string to give users the ability to apply
// filters on the data. We need to change the API here to return
// a [][]string if we want to make use of filtering.
opts := psgo.JoinNamespaceOpts{FillMappings: rootless.IsRootless()}
diff --git a/libpod/reset.go b/libpod/reset.go
index 28d0ee3f6..b3ece03bf 100644
--- a/libpod/reset.go
+++ b/libpod/reset.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -13,12 +14,81 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+// removeAllDirs removes all Podman storage directories. It is intended to be
+// used as a backup for reset() when that function cannot be used due to
+// failures in initializing libpod.
+// It does not expect that all the directories match what is in use by Podman,
+// as this is a common failure point for `system reset`. As such, our ability to
+// interface with containers and pods is somewhat limited.
+// This function assumes that we do not have a working c/storage store.
+func (r *Runtime) removeAllDirs() error {
+ var lastErr error
+
+ // Grab the runtime alive lock.
+ // This ensures that no other Podman process can run while we are doing
+ // a reset, so no race conditions with containers/pods/etc being created
+ // while we are resetting storage.
+ // TODO: maybe want a helper for getting the path? This is duped from
+ // runtime.go
+ runtimeAliveLock := filepath.Join(r.config.Engine.TmpDir, "alive.lck")
+ aliveLock, err := storage.GetLockfile(runtimeAliveLock)
+ if err != nil {
+ logrus.Errorf("Lock runtime alive lock %s: %v", runtimeAliveLock, err)
+ } else {
+ aliveLock.Lock()
+ defer aliveLock.Unlock()
+ }
+
+ // We do not have a store - so we can't really try and remove containers
+ // or pods or volumes...
+ // Try and remove the directories, in hopes that they are unmounted.
+ // This is likely to fail but it's the best we can do.
+
+ // Volume path
+ if err := os.RemoveAll(r.config.Engine.VolumePath); err != nil {
+ lastErr = fmt.Errorf("removing volume path: %w", err)
+ }
+
+ // Tmpdir
+ if err := os.RemoveAll(r.config.Engine.TmpDir); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing tmp dir: %w", err)
+ }
+
+ // Runroot
+ if err := os.RemoveAll(r.storageConfig.RunRoot); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing run root: %w", err)
+ }
+
+ // Static dir
+ if err := os.RemoveAll(r.config.Engine.StaticDir); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing static dir: %w", err)
+ }
+
+ // Graph root
+ if err := os.RemoveAll(r.storageConfig.GraphRoot); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing graph root: %w", err)
+ }
+
+ return lastErr
+}
+
// Reset removes all storage
-func (r *Runtime) Reset(ctx context.Context) error {
+func (r *Runtime) reset(ctx context.Context) error {
var timeout *uint
pods, err := r.GetAllPods()
if err != nil {
@@ -26,7 +96,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
}
for _, p := range pods {
if err := r.RemovePod(ctx, p, true, true, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchPod {
+ if errors.Is(err, define.ErrNoSuchPod) {
continue
}
logrus.Errorf("Removing Pod %s: %v", p.ID(), err)
@@ -41,7 +111,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
for _, c := range ctrs {
if err := r.RemoveContainer(ctx, c, true, true, timeout); err != nil {
if err := r.RemoveStorageContainer(c.ID(), true); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
continue
}
logrus.Errorf("Removing container %s: %v", c.ID(), err)
@@ -64,7 +134,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
}
for _, v := range volumes {
if err := r.RemoveVolume(ctx, v, true, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchVolume {
+ if errors.Is(err, define.ErrNoSuchVolume) {
continue
}
logrus.Errorf("Removing volume %s: %v", v.config.Name, err)
@@ -94,7 +164,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if prevError != nil {
logrus.Error(prevError)
}
- prevError = errors.Errorf("failed to remove runtime graph root dir %s, since it is the same as XDG_RUNTIME_DIR", graphRoot)
+ prevError = fmt.Errorf("failed to remove runtime graph root dir %s, since it is the same as XDG_RUNTIME_DIR", graphRoot)
} else {
if err := os.RemoveAll(graphRoot); err != nil {
if prevError != nil {
@@ -108,7 +178,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if prevError != nil {
logrus.Error(prevError)
}
- prevError = errors.Errorf("failed to remove runtime root dir %s, since it is the same as XDG_RUNTIME_DIR", runRoot)
+ prevError = fmt.Errorf("failed to remove runtime root dir %s, since it is the same as XDG_RUNTIME_DIR", runRoot)
} else {
if err := os.RemoveAll(runRoot); err != nil {
if prevError != nil {
@@ -129,7 +199,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if prevError != nil {
logrus.Error(prevError)
}
- prevError = errors.Errorf("failed to remove runtime tmpdir %s, since it is the same as XDG_RUNTIME_DIR", tempDir)
+ prevError = fmt.Errorf("failed to remove runtime tmpdir %s, since it is the same as XDG_RUNTIME_DIR", tempDir)
} else {
if err := os.RemoveAll(tempDir); err != nil {
if prevError != nil {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4efa7b8e8..ea4b34954 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
"fmt"
"os"
"os/exec"
@@ -11,6 +12,7 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
"syscall"
"time"
@@ -39,7 +41,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/docker/docker/pkg/namesgenerator"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -87,14 +88,18 @@ type Runtime struct {
lockManager lock.Manager
// Worker
- workerShutdown chan bool
- workerChannel chan func()
+ workerChannel chan func()
+ workerGroup sync.WaitGroup
// syslog describes whenever logrus should log to the syslog as well.
// Note that the syslog hook will be enabled early in cmd/podman/syslog_linux.go
// This bool is just needed so that we can set it for netavark interface.
syslog bool
+ // doReset indicates that the runtime should perform a system reset.
+ // All Podman files will be removed.
+ doReset bool
+
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
// Once the runtime has been initialized and returned, this variable is
@@ -130,7 +135,7 @@ func SetXdgDirs() error {
return nil
}
- // Setup XDG_RUNTIME_DIR
+ // Set up XDG_RUNTIME_DIR
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
@@ -141,7 +146,7 @@ func SetXdgDirs() error {
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
+ return fmt.Errorf("cannot set XDG_RUNTIME_DIR: %w", err)
}
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
@@ -151,14 +156,14 @@ func SetXdgDirs() error {
}
}
- // Setup XDG_CONFIG_HOME
+ // Set up XDG_CONFIG_HOME
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
+ return fmt.Errorf("cannot set XDG_CONFIG_HOME: %w", err)
}
}
return nil
@@ -209,7 +214,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
// Overwrite config with user-given configuration options
for _, opt := range options {
if err := opt(runtime); err != nil {
- return nil, errors.Wrapf(err, "error configuring runtime")
+ return nil, fmt.Errorf("error configuring runtime: %w", err)
}
}
@@ -220,12 +225,12 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
}
os.Exit(1)
return nil
- }); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists {
+ }); err != nil && !errors.Is(err, shutdown.ErrHandlerExists) {
logrus.Errorf("Registering shutdown handler for libpod: %v", err)
}
if err := shutdown.Start(); err != nil {
- return nil, errors.Wrapf(err, "error starting shutdown signal handler")
+ return nil, fmt.Errorf("error starting shutdown signal handler: %w", err)
}
if err := makeRuntime(runtime); err != nil {
@@ -234,6 +239,11 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
runtime.config.CheckCgroupsAndAdjustConfig()
+ // If resetting storage, do *not* return a runtime.
+ if runtime.doReset {
+ return nil, nil
+ }
+
return runtime, nil
}
@@ -246,10 +256,10 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
manager, err = lock.NewFileLockManager(lockPath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get new file lock manager")
+ return nil, fmt.Errorf("failed to get new file lock manager: %w", err)
}
} else {
return nil, err
@@ -265,19 +275,19 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
switch {
- case os.IsNotExist(errors.Cause(err)):
+ case errors.Is(err, os.ErrNotExist):
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get new shm lock manager")
+ return nil, fmt.Errorf("failed to get new shm lock manager: %w", err)
}
- case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
+ case errors.Is(err, syscall.ERANGE) && runtime.doRenumber:
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
- return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ return nil, fmt.Errorf("error removing libpod locks file %s: %w", lockPath, err)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
@@ -289,7 +299,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
}
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
+ return nil, fmt.Errorf("unknown lock type %s: %w", runtime.config.Engine.LockType, define.ErrInvalidArg)
}
return manager, nil
}
@@ -304,11 +314,18 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
runtime.conmonPath = cPath
+ if runtime.noStore && runtime.doReset {
+ return fmt.Errorf("cannot perform system reset if runtime is not creating a store: %w", define.ErrInvalidArg)
+ }
+ if runtime.doReset && runtime.doRenumber {
+ return fmt.Errorf("cannot perform system reset while renumbering locks: %w", define.ErrInvalidArg)
+ }
+
// Make the static files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating runtime static files directory")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating runtime static files directory: %w", err)
}
}
@@ -320,9 +337,9 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// package.
switch runtime.config.Engine.StateType {
case config.InMemoryStateStore:
- return errors.Wrapf(define.ErrInvalidArg, "in-memory state is currently disabled")
+ return fmt.Errorf("in-memory state is currently disabled: %w", define.ErrInvalidArg)
case config.SQLiteStateStore:
- return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
+ return fmt.Errorf("SQLite state is currently disabled: %w", define.ErrInvalidArg)
case config.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
@@ -332,13 +349,27 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
runtime.state = state
default:
- return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
+ return fmt.Errorf("unrecognized state type passed (%v): %w", runtime.config.Engine.StateType, define.ErrInvalidArg)
}
// Grab config from the database so we can reset some defaults
dbConfig, err := runtime.state.GetDBConfig()
if err != nil {
- return errors.Wrapf(err, "error retrieving runtime configuration from database")
+ if runtime.doReset {
+ // We can at least delete the DB and the static files
+ // directory.
+ // Can't safely touch anything else because we aren't
+ // sure of other directories.
+ if err := runtime.state.Close(); err != nil {
+ logrus.Errorf("Closing database connection: %v", err)
+ } else {
+ if err := os.RemoveAll(runtime.config.Engine.StaticDir); err != nil {
+ logrus.Errorf("Removing static files directory %v: %v", runtime.config.Engine.StaticDir, err)
+ }
+ }
+ }
+
+ return fmt.Errorf("error retrieving runtime configuration from database: %w", err)
}
runtime.mergeDBConfig(dbConfig)
@@ -371,11 +402,17 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Validate our config against the database, now that we've set our
// final storage configuration
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
- return err
+ // If we are performing a storage reset: continue on with a
+ // warning. Otherwise we can't `system reset` after a change to
+ // the core paths.
+ if !runtime.doReset {
+ return err
+ }
+ logrus.Errorf("Runtime paths differ from those stored in database, storage reset may not remove all files")
}
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
- return errors.Wrapf(err, "error setting libpod namespace in state")
+ return fmt.Errorf("error setting libpod namespace in state: %w", err)
}
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
@@ -393,6 +430,14 @@ func makeRuntime(runtime *Runtime) (retErr error) {
} else if runtime.noStore {
logrus.Debug("No store required. Not opening container store.")
} else if err := runtime.configureStore(); err != nil {
+ // Make a best-effort attempt to clean up if performing a
+ // storage reset.
+ if runtime.doReset {
+ if err := runtime.removeAllDirs(); err != nil {
+ logrus.Errorf("Removing libpod directories: %v", err)
+ }
+ }
+
return err
}
defer func() {
@@ -405,13 +450,12 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
}()
- // Setup the eventer
+ // Set up the eventer
eventer, err := runtime.newEventer()
if err != nil {
return err
}
runtime.eventer = eventer
- // TODO: events for libimage
// Set up containers/image
if runtime.imageContext == nil {
@@ -424,16 +468,16 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating tmpdir")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating tmpdir: %w", err)
}
}
// Create events log dir
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating events dirs")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating events dirs: %w", err)
}
}
@@ -470,7 +514,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
} else {
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
if !ok {
- return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
+ return fmt.Errorf("default OCI runtime %q not found: %w", runtime.config.Engine.OCIRuntime, define.ErrInvalidArg)
}
runtime.defaultOCIRuntime = ociRuntime
}
@@ -479,23 +523,23 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Do we have at least one valid OCI runtime?
if len(runtime.ociRuntimes) == 0 {
- return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
+ return fmt.Errorf("no OCI runtime has been configured: %w", define.ErrInvalidArg)
}
// Do we have a default runtime?
if runtime.defaultOCIRuntime == nil {
- return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
+ return fmt.Errorf("no default OCI runtime was configured: %w", define.ErrInvalidArg)
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating runtime temporary files directory")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating runtime temporary files directory: %w", err)
}
}
- // the store is only setup when we are in the userns so we do the same for the network interface
+ // the store is only set up when we are in the userns so we do the same for the network interface
if !needsUserns {
netBackend, netInterface, err := network.NetworkBackend(runtime.store, runtime.config, runtime.syslog)
if err != nil {
@@ -512,12 +556,10 @@ func makeRuntime(runtime *Runtime) (retErr error) {
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
- return errors.Wrapf(err, "error acquiring runtime init lock")
+ return fmt.Errorf("error acquiring runtime init lock: %w", err)
}
// Acquire the lock and hold it until we return
// This ensures that no two processes will be in runtime.refresh at once
- // TODO: we can't close the FD in this lock, so we should keep it around
- // and use it to lock important operations
aliveLock.Lock()
doRefresh := false
defer func() {
@@ -544,7 +586,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
pausePid, err := util.GetRootlessPauseProcessPidPathGivenDir(runtime.config.Engine.TmpDir)
if err != nil {
- return errors.Wrapf(err, "could not get pause process pid file path")
+ return fmt.Errorf("could not get pause process pid file path: %w", err)
}
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
if err != nil {
@@ -565,10 +607,10 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// This will trigger on first use as well, but refreshing an
// empty state only creates a single file
// As such, it's not really a performance concern
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
doRefresh = true
} else {
- return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
+ return fmt.Errorf("error reading runtime status file %s: %w", runtimeAliveFile, err)
}
}
@@ -577,6 +619,18 @@ func makeRuntime(runtime *Runtime) (retErr error) {
return err
}
+ // If we're resetting storage, do it now.
+ // We will not return a valid runtime.
+ // TODO: Plumb this context out so it can be set.
+ if runtime.doReset {
+ // Mark the runtime as valid, so normal functionality "mostly"
+ // works and we can use regular functions to remove
+ // ctrs/pods/etc
+ runtime.valid = true
+
+ return runtime.reset(context.Background())
+ }
+
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
// runtime.
@@ -650,14 +704,14 @@ func findConmon(conmonPaths []string) (string, error) {
}
if foundOutdatedConmon {
- return "", errors.Wrapf(define.ErrConmonOutdated,
- "please update to v%d.%d.%d or later",
- conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion)
+ return "", fmt.Errorf(
+ "please update to v%d.%d.%d or later: %w",
+ conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion, define.ErrConmonOutdated)
}
- return "", errors.Wrapf(define.ErrInvalidArg,
- "could not find a working conmon binary (configured options: %v)",
- conmonPaths)
+ return "", fmt.Errorf(
+ "could not find a working conmon binary (configured options: %v): %w",
+ conmonPaths, define.ErrInvalidArg)
}
// probeConmon calls conmon --version and verifies it is a new enough version for
@@ -674,11 +728,11 @@ func probeConmon(conmonBinary string) error {
matches := r.FindStringSubmatch(out.String())
if len(matches) != 4 {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
major, err := strconv.Atoi(matches[1])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if major < conmonMinMajorVersion {
return define.ErrConmonOutdated
@@ -689,7 +743,7 @@ func probeConmon(conmonBinary string) error {
minor, err := strconv.Atoi(matches[2])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if minor < conmonMinMinorVersion {
return define.ErrConmonOutdated
@@ -700,7 +754,7 @@ func probeConmon(conmonBinary string) error {
patch, err := strconv.Atoi(matches[3])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if patch < conmonMinPatchVersion {
return define.ErrConmonOutdated
@@ -744,7 +798,7 @@ func (r *Runtime) GetConfig() (*config.Config, error) {
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(rtConfig, config); err != nil {
- return nil, errors.Wrapf(err, "error copying config")
+ return nil, fmt.Errorf("error copying config: %w", err)
}
return config, nil
@@ -820,15 +874,12 @@ func (r *Runtime) DeferredShutdown(force bool) {
// still containers running or mounted
func (r *Runtime) Shutdown(force bool) error {
if !r.valid {
- return define.ErrRuntimeStopped
+ return nil
}
- if r.workerShutdown != nil {
- // Signal the worker routine to shutdown. The routine will
- // process all pending work items and then read from the
- // channel; we're blocked until all work items have been
- // processed.
- r.workerShutdown <- true
+ if r.workerChannel != nil {
+ r.workerGroup.Wait()
+ close(r.workerChannel)
}
r.valid = false
@@ -858,7 +909,7 @@ func (r *Runtime) Shutdown(force bool) error {
// Note that the libimage runtime shuts down the store.
if err := r.libimageRuntime.Shutdown(force); err != nil {
- lastError = errors.Wrapf(err, "error shutting down container storage")
+ lastError = fmt.Errorf("error shutting down container storage: %w", err)
}
}
if err := r.state.Close(); err != nil {
@@ -890,15 +941,15 @@ func (r *Runtime) refresh(alivePath string) error {
// Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
- return errors.Wrapf(err, "error retrieving all containers from state")
+ return fmt.Errorf("error retrieving all containers from state: %w", err)
}
pods, err := r.state.AllPods()
if err != nil {
- return errors.Wrapf(err, "error retrieving all pods from state")
+ return fmt.Errorf("error retrieving all pods from state: %w", err)
}
vols, err := r.state.AllVolumes()
if err != nil {
- return errors.Wrapf(err, "error retrieving all volumes from state")
+ return fmt.Errorf("error retrieving all volumes from state: %w", err)
}
// No locks are taken during pod, volume, and container refresh.
// Furthermore, the pod/volume/container refresh() functions are not
@@ -926,7 +977,7 @@ func (r *Runtime) refresh(alivePath string) error {
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
- return errors.Wrap(err, "error creating runtime status file")
+ return fmt.Errorf("error creating runtime status file: %w", err)
}
defer file.Close()
@@ -947,13 +998,13 @@ func (r *Runtime) generateName() (string, error) {
// Make sure container with this name does not exist
if _, err := r.state.LookupContainer(name); err == nil {
continue
- } else if errors.Cause(err) != define.ErrNoSuchCtr {
+ } else if !errors.Is(err, define.ErrNoSuchCtr) {
return "", err
}
// Make sure pod with this name does not exist
if _, err := r.state.LookupPod(name); err == nil {
continue
- } else if errors.Cause(err) != define.ErrNoSuchPod {
+ } else if !errors.Is(err, define.ErrNoSuchPod) {
return "", err
}
return name, nil
@@ -1072,7 +1123,7 @@ func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
r.storageConfig.GraphDriverName != "" {
- logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
+ logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve. May prevent use of images created by other tools",
r.storageConfig.GraphDriverName, dbConfig.GraphDriver)
}
r.storageConfig.GraphDriverName = dbConfig.GraphDriver
@@ -1143,19 +1194,21 @@ func (r *Runtime) reloadStorageConf() error {
return nil
}
-// getVolumePlugin gets a specific volume plugin given its name.
-func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
+// getVolumePlugin gets a specific volume plugin.
+func (r *Runtime) getVolumePlugin(volConfig *VolumeConfig) (*plugin.VolumePlugin, error) {
// There is no plugin for local.
+ name := volConfig.Driver
+ timeout := volConfig.Timeout
if name == define.VolumeDriverLocal || name == "" {
return nil, nil
}
pluginPath, ok := r.config.Engine.VolumePlugins[name]
if !ok {
- return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
+ return nil, fmt.Errorf("no volume plugin with name %s available: %w", name, define.ErrMissingPlugin)
}
- return plugin.GetVolumePlugin(name, pluginPath)
+ return plugin.GetVolumePlugin(name, pluginPath, timeout)
}
// GetSecretsStorageDir returns the directory that the secrets manager should take
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index 1c528e1b8..047375628 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -1,11 +1,12 @@
package libpod
import (
+ "errors"
+ "fmt"
"time"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,7 +39,7 @@ func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) {
// Look up if container is in state
hasCtr, err := r.state.HasContainer(ctr.ID)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container %s in state", ctr.ID)
+ return nil, fmt.Errorf("error looking up container %s in state: %w", ctr.ID, err)
}
storageCtr.PresentInLibpod = hasCtr
@@ -60,20 +61,20 @@ func (r *Runtime) StorageContainer(idOrName string) (*storage.Container, error)
func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
targetID, err := r.store.Lookup(idOrName)
if err != nil {
- if errors.Cause(err) == storage.ErrLayerUnknown {
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID or name %q found", idOrName)
+ if errors.Is(err, storage.ErrLayerUnknown) {
+ return fmt.Errorf("no container with ID or name %q found: %w", idOrName, define.ErrNoSuchCtr)
}
- return errors.Wrapf(err, "error looking up container %q", idOrName)
+ return fmt.Errorf("error looking up container %q: %w", idOrName, err)
}
// Lookup returns an ID but it's not guaranteed to be a container ID.
// So we can still error here.
ctr, err := r.store.Container(targetID)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
- return errors.Wrapf(define.ErrNoSuchCtr, "%q does not refer to a container", idOrName)
+ if errors.Is(err, storage.ErrContainerUnknown) {
+ return fmt.Errorf("%q does not refer to a container: %w", idOrName, define.ErrNoSuchCtr)
}
- return errors.Wrapf(err, "error retrieving container %q", idOrName)
+ return fmt.Errorf("error retrieving container %q: %w", idOrName, err)
}
// Error out if the container exists in libpod
@@ -82,13 +83,13 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
return err
}
if exists {
- return errors.Wrapf(define.ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID)
+ return fmt.Errorf("refusing to remove %q as it exists in libpod as container %s: %w", idOrName, ctr.ID, define.ErrCtrExists)
}
if !force {
timesMounted, err := r.store.Mounted(ctr.ID)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
// Container was removed from under us.
// It's gone, so don't bother erroring.
logrus.Infof("Storage for container %s already removed", ctr.ID)
@@ -97,7 +98,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
logrus.Warnf("Checking if container %q is mounted, attempting to delete: %v", idOrName, err)
}
if timesMounted > 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
+ return fmt.Errorf("container %q is mounted and cannot be removed without using force: %w", idOrName, define.ErrCtrStateInvalid)
}
} else if _, err := r.store.Unmount(ctr.ID, true); err != nil {
if errors.Is(err, storage.ErrContainerUnknown) {
@@ -109,12 +110,12 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
}
if err := r.store.DeleteContainer(ctr.ID); err != nil {
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
// Container again gone, no error
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
}
- return errors.Wrapf(err, "error removing storage for container %q", idOrName)
+ return fmt.Errorf("error removing storage for container %q: %w", idOrName, err)
}
return nil
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 2eaa77572..ce0fd869d 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -13,6 +14,7 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/libpod/shutdown"
@@ -25,7 +27,6 @@ import (
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -85,7 +86,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
ctr, err := r.initContainerVariables(rSpec, config)
if err != nil {
- return nil, errors.Wrapf(err, "error initializing container variables")
+ return nil, fmt.Errorf("error initializing container variables: %w", err)
}
// For an imported checkpoint no one has ever set the StartedTime. Set it now.
ctr.state.StartedTime = time.Now()
@@ -125,7 +126,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
// the config was re-written.
newConf, err := r.state.GetContainerConfig(ctr.ID())
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", ctr.ID())
+ return nil, fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", ctr.ID(), err)
}
ctr.config = newConf
@@ -142,7 +143,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
// Set config back to the old name so reflect what is actually
// present in the DB.
ctr.config.Name = oldName
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ return nil, fmt.Errorf("error renaming container %s: %w", ctr.ID(), err)
}
// Step 3: rename the container in c/storage.
@@ -161,7 +162,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
if rSpec == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
+ return nil, fmt.Errorf("must provide a valid runtime spec to create container: %w", define.ErrInvalidArg)
}
ctr := new(Container)
ctr.config = new(ContainerConfig)
@@ -171,7 +172,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
ctr.config.ID = stringid.GenerateNonCryptoID()
size, err := units.FromHumanSize(r.config.Containers.ShmSize)
if err != nil {
- return nil, errors.Wrapf(err, "converting containers.conf ShmSize %s to an int", r.config.Containers.ShmSize)
+ return nil, fmt.Errorf("converting containers.conf ShmSize %s to an int: %w", r.config.Containers.ShmSize, err)
}
ctr.config.ShmSize = size
ctr.config.NoShm = false
@@ -183,7 +184,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
// This is a restore from an imported checkpoint
ctr.restoreFromCheckpoint = true
if err := JSONDeepCopy(config, ctr.config); err != nil {
- return nil, errors.Wrapf(err, "error copying container config for restore")
+ return nil, fmt.Errorf("error copying container config for restore: %w", err)
}
// If the ID is empty a new name for the restored container was requested
if ctr.config.ID == "" {
@@ -223,12 +224,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr, err = r.initContainerVariables(rSpec, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error initializing container variables")
+ return nil, fmt.Errorf("error initializing container variables: %w", err)
}
for _, option := range options {
if err := option(ctr); err != nil {
- return nil, errors.Wrapf(err, "error running container create option")
+ return nil, fmt.Errorf("error running container create option: %w", err)
}
}
@@ -246,8 +247,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
for _, opts := range ctr.config.Networks {
if opts.InterfaceName != "" {
// check that no name is assigned to more than network
- if util.StringInSlice(opts.InterfaceName, usedIfNames) {
- return nil, errors.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
+ if cutil.StringInSlice(opts.InterfaceName, usedIfNames) {
+ return nil, fmt.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
}
usedIfNames = append(usedIfNames, opts.InterfaceName)
}
@@ -262,7 +263,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
if opts.InterfaceName == "" {
for i < 100000 {
ifName := fmt.Sprintf("eth%d", i)
- if !util.StringInSlice(ifName, usedIfNames) {
+ if !cutil.StringInSlice(ifName, usedIfNames) {
opts.InterfaceName = ifName
usedIfNames = append(usedIfNames, ifName)
break
@@ -295,7 +296,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Allocate a lock for the container
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new container")
+ return nil, fmt.Errorf("error allocating lock for new container: %w", err)
}
ctr.lock = lock
ctr.config.LockID = ctr.lock.ID()
@@ -318,7 +319,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
} else {
ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime]
if !ok {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime)
+ return nil, fmt.Errorf("requested OCI runtime %s is not available: %w", ctr.config.OCIRuntime, define.ErrInvalidArg)
}
ctr.ociRuntime = ociRuntime
}
@@ -326,7 +327,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Check NoCgroups support
if ctr.config.NoCgroups {
if !ctr.ociRuntime.SupportsNoCgroups() {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not compatible with NoCgroups", ctr.ociRuntime.Name())
+ return nil, fmt.Errorf("requested OCI runtime %s is not compatible with NoCgroups: %w", ctr.ociRuntime.Name(), define.ErrInvalidArg)
}
}
@@ -335,7 +336,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Get the pod from state
pod, err = r.state.Pod(ctr.config.Pod)
if err != nil {
- return nil, errors.Wrapf(err, "cannot add container %s to pod %s", ctr.ID(), ctr.config.Pod)
+ return nil, fmt.Errorf("cannot add container %s to pod %s: %w", ctr.ID(), ctr.config.Pod, err)
}
}
@@ -349,14 +350,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
if pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra() {
podCgroup, err := pod.CgroupPath()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
+ return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
}
expectPodCgroup, err := ctr.expectPodCgroup()
if err != nil {
return nil, err
}
if expectPodCgroup && podCgroup == "" {
- return nil, errors.Wrapf(define.ErrInternal, "pod %s cgroup is not set", pod.ID())
+ return nil, fmt.Errorf("pod %s cgroup is not set: %w", pod.ID(), define.ErrInternal)
}
canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(podCgroup)
if canUseCgroup {
@@ -366,7 +367,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.CgroupParent = CgroupfsDefaultCgroupParent
}
} else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
}
case config.SystemdCgroupsManager:
if ctr.config.CgroupParent == "" {
@@ -374,7 +375,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
case pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra():
podCgroup, err := pod.CgroupPath()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
+ return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
}
ctr.config.CgroupParent = podCgroup
case rootless.IsRootless() && ctr.config.CgroupsMode != cgroupSplit:
@@ -383,10 +384,10 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.CgroupParent = SystemdDefaultCgroupParent
}
} else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
+ return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
@@ -469,8 +470,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctrNamedVolumes = append(ctrNamedVolumes, dbVol)
// The volume exists, we're good
continue
- } else if errors.Cause(err) != define.ErrNoSuchVolume {
- return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name)
+ } else if !errors.Is(err, define.ErrNoSuchVolume) {
+ return nil, fmt.Errorf("error retrieving named volume %s for new container: %w", vol.Name, err)
}
}
@@ -501,9 +502,9 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
volOptions = append(volOptions, parsedOptions...)
}
}
- newVol, err := r.newVolume(volOptions...)
+ newVol, err := r.newVolume(false, volOptions...)
if err != nil {
- return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name)
+ return nil, fmt.Errorf("error creating named volume %q: %w", vol.Name, err)
}
ctrNamedVolumes = append(ctrNamedVolumes, newVol)
@@ -526,7 +527,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.ShmDir = filepath.Join(ctr.bundlePath(), "shm")
if err := os.MkdirAll(ctr.config.ShmDir, 0700); err != nil {
if !os.IsExist(err) {
- return nil, errors.Wrap(err, "unable to create shm dir")
+ return nil, fmt.Errorf("unable to create shm dir: %w", err)
}
}
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
@@ -595,7 +596,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// exist once we're done.
newConf, err := r.state.GetContainerConfig(c.ID())
if err != nil {
- return errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", c.ID())
+ return fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", c.ID(), err)
}
c.config = newConf
@@ -610,12 +611,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if c.config.Pod != "" && !removePod {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
}
// Lock the pod while we're removing container
if pod.config.LockID == c.config.LockID {
- return errors.Wrapf(define.ErrWillDeadlock, "container %s and pod %s share lock ID %d", c.ID(), pod.ID(), c.config.LockID)
+ return fmt.Errorf("container %s and pod %s share lock ID %d: %w", c.ID(), pod.ID(), c.config.LockID, define.ErrWillDeadlock)
}
pod.lock.Lock()
defer pod.lock.Unlock()
@@ -625,7 +626,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
infraID := pod.state.InfraContainerID
if c.ID() == infraID {
- return errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
+ return fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
}
}
@@ -663,9 +664,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
if c.state.State == define.ContainerStatePaused {
- if err := c.ociRuntime.KillContainer(c, 9, false); err != nil {
- return err
- }
isV2, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
return err
@@ -676,6 +674,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
return err
}
}
+ if err := c.ociRuntime.KillContainer(c, 9, false); err != nil {
+ return err
+ }
// Need to update container state to make sure we know it's stopped
if err := c.waitForExitFileAndSync(); err != nil {
return err
@@ -692,7 +693,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
- return errors.Wrapf(define.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
+ return fmt.Errorf("container %s has dependent containers which must be removed before it: %s: %w", c.ID(), depsStr, define.ErrCtrExists)
}
}
@@ -704,8 +705,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
// Ignore ErrConmonDead - we couldn't retrieve the container's
// exit code properly, but it's still stopped.
- if err := c.stop(time); err != nil && errors.Cause(err) != define.ErrConmonDead {
- return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
+ if err := c.stop(time); err != nil && !errors.Is(err, define.ErrConmonDead) {
+ return fmt.Errorf("cannot remove container %s as it could not be stopped: %w", c.ID(), err)
}
// We unlocked as part of stop() above - there's a chance someone
@@ -714,6 +715,10 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Do a quick ping of the database to check if the container
// still exists.
if ok, _ := r.state.HasContainer(c.ID()); !ok {
+ // When the container has already been removed, the OCI runtime directory remain.
+ if err := c.cleanupRuntime(ctx); err != nil {
+ return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err)
+ }
return nil
}
}
@@ -724,7 +729,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Do this before we set ContainerStateRemoving, to ensure that we can
// actually remove from the OCI runtime.
if err := c.cleanup(ctx); err != nil {
- cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
+ cleanupErr = fmt.Errorf("error cleaning up container %s: %w", c.ID(), err)
}
// Set ContainerStateRemoving
@@ -734,7 +739,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr != nil {
logrus.Errorf(err.Error())
}
- return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())
+ return fmt.Errorf("unable to set container %s removing state in database: %w", c.ID(), err)
}
// Remove all active exec sessions
@@ -754,7 +759,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr == nil {
cleanupErr = err
} else {
- logrus.Errorf("Cleanup storage: %v", err)
+ logrus.Errorf("Cleaning up storage: %v", err)
}
}
@@ -784,7 +789,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Deallocate the container's lock
if err := c.lock.Free(); err != nil {
if cleanupErr == nil {
- cleanupErr = errors.Wrapf(err, "error freeing lock for container %s", c.ID())
+ cleanupErr = fmt.Errorf("error freeing lock for container %s: %w", c.ID(), err)
} else {
logrus.Errorf("Free container lock: %v", err)
}
@@ -804,16 +809,16 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if !volume.Anonymous() {
continue
}
- if err := runtime.removeVolume(ctx, volume, false, timeout); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
- if errors.Cause(err) == define.ErrVolumeBeingUsed {
+ if err := runtime.removeVolume(ctx, volume, false, timeout, false); err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
// Ignore error, since podman will report original error
volumesFrom, _ := c.volumesFrom()
if len(volumesFrom) > 0 {
- logrus.Debugf("Cleanup volume not possible since volume is in use (%s)", v)
+ logrus.Debugf("Cleaning up volume not possible since volume is in use (%s)", v)
continue
}
}
- logrus.Errorf("Cleanup volume (%s): %v", v, err)
+ logrus.Errorf("Cleaning up volume (%s): %v", v, err)
}
}
}
@@ -886,7 +891,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
c := new(Container)
c.config, err = r.state.GetContainerConfig(id)
if err != nil {
- return id, errors.Wrapf(err, "failed to retrieve config for ctr ID %q", id)
+ return id, fmt.Errorf("failed to retrieve config for ctr ID %q: %w", id, err)
}
c.state = new(ContainerState)
@@ -898,7 +903,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
if c.config.Pod != "" {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
- return id, errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
+ return id, fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
}
// Lock the pod while we're removing container
@@ -913,7 +918,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
return "", err
}
if c.ID() == infraID {
- return id, errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
+ return id, fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
}
}
@@ -962,8 +967,8 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
if !volume.Anonymous() {
continue
}
- if err := r.removeVolume(ctx, volume, false, timeout); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
- logrus.Errorf("Cleanup volume (%s): %v", v, err)
+ if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
+ logrus.Errorf("Cleaning up volume (%s): %v", v, err)
}
}
}
@@ -1110,7 +1115,7 @@ func (r *Runtime) GetContainersByList(containers []string) ([]*Container, error)
for _, inputContainer := range containers {
ctr, err := r.LookupContainer(inputContainer)
if err != nil {
- return ctrs, errors.Wrapf(err, "unable to lookup container %s", inputContainer)
+ return ctrs, fmt.Errorf("unable to look up container %s: %w", inputContainer, err)
}
ctrs = append(ctrs, ctr)
}
@@ -1123,7 +1128,7 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
var lastCreatedTime time.Time
ctrs, err := r.GetAllContainers()
if err != nil {
- return nil, errors.Wrapf(err, "unable to find latest container")
+ return nil, fmt.Errorf("unable to find latest container: %w", err)
}
if len(ctrs) == 0 {
return nil, define.ErrNoSuchCtr
@@ -1204,7 +1209,7 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) ([]*reports.Pru
// MountStorageContainer mounts the storage container's root filesystem
func (r *Runtime) MountStorageContainer(id string) (string, error) {
if _, err := r.GetContainer(id); err == nil {
- return "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
container, err := r.store.Container(id)
if err != nil {
@@ -1212,7 +1217,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
}
mountPoint, err := r.store.Mount(container.ID, "")
if err != nil {
- return "", errors.Wrapf(err, "error mounting storage for container %s", id)
+ return "", fmt.Errorf("error mounting storage for container %s: %w", id, err)
}
return mountPoint, nil
}
@@ -1220,7 +1225,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
// UnmountStorageContainer unmounts the storage container's root filesystem
func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
if _, err := r.GetContainer(id); err == nil {
- return false, errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return false, fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
container, err := r.store.Container(id)
if err != nil {
@@ -1234,7 +1239,7 @@ func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
func (r *Runtime) IsStorageContainerMounted(id string) (bool, string, error) {
var path string
if _, err := r.GetContainer(id); err == nil {
- return false, "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return false, "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
mountCnt, err := r.storageService.MountedContainerImage(id)
@@ -1260,13 +1265,13 @@ func (r *Runtime) StorageContainers() ([]storage.Container, error) {
storeContainers, err := r.store.Containers()
if err != nil {
- return nil, errors.Wrapf(err, "error reading list of all storage containers")
+ return nil, fmt.Errorf("error reading list of all storage containers: %w", err)
}
retCtrs := []storage.Container{}
for _, container := range storeContainers {
exists, err := r.state.HasContainer(container.ID)
if err != nil && err != define.ErrNoSuchCtr {
- return nil, errors.Wrapf(err, "failed to check if %s container exists in database", container.ID)
+ return nil, fmt.Errorf("failed to check if %s container exists in database: %w", container.ID, err)
}
if exists {
continue
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index b13482722..d04607d2e 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -2,6 +2,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -13,7 +15,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -40,14 +41,14 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
if ctr.config.IsInfra {
pod, err := r.state.Pod(ctr.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", ctr.ID(), ctr.config.Pod, err)
}
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
- return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
+ return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
} else {
if err := r.removeContainer(ctx, ctr, true, false, false, timeout); err != nil {
- return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
+ return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
}
}
@@ -106,7 +107,7 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions,
func DownloadFromFile(reader *os.File) (string, error) {
outFile, err := ioutil.TempFile(util.Tmpdir(), "import")
if err != nil {
- return "", errors.Wrap(err, "error creating file")
+ return "", fmt.Errorf("error creating file: %w", err)
}
defer outFile.Close()
@@ -114,7 +115,7 @@ func DownloadFromFile(reader *os.File) (string, error) {
_, err = io.Copy(outFile, reader)
if err != nil {
- return "", errors.Wrapf(err, "error saving %s to %s", reader.Name(), outFile.Name())
+ return "", fmt.Errorf("error saving %s to %s: %w", reader.Name(), outFile.Name(), err)
}
return outFile.Name(), nil
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index f56cb83a4..139638a6b 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -14,7 +14,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,21 +21,21 @@ func (r *Runtime) stopPauseProcess() error {
if rootless.IsRootless() {
pausePidPath, err := util.GetRootlessPauseProcessPidPathGivenDir(r.config.Engine.TmpDir)
if err != nil {
- return errors.Wrapf(err, "could not get pause process pid file path")
+ return fmt.Errorf("could not get pause process pid file path: %w", err)
}
data, err := ioutil.ReadFile(pausePidPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrap(err, "cannot read pause process pid file")
+ return fmt.Errorf("cannot read pause process pid file: %w", err)
}
pausePid, err := strconv.Atoi(string(data))
if err != nil {
- return errors.Wrapf(err, "cannot parse pause pid file %s", pausePidPath)
+ return fmt.Errorf("cannot parse pause pid file %s: %w", pausePidPath, err)
}
if err := os.Remove(pausePidPath); err != nil {
- return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
+ return fmt.Errorf("cannot delete pause pid file %s: %w", pausePidPath, err)
}
if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
return err
@@ -60,7 +59,7 @@ func (r *Runtime) migrate() error {
for _, ctr := range runningContainers {
fmt.Printf("stopped %s\n", ctr.ID())
if err := ctr.Stop(); err != nil {
- return errors.Wrapf(err, "cannot stop container %s", ctr.ID())
+ return fmt.Errorf("cannot stop container %s: %w", ctr.ID(), err)
}
}
@@ -68,7 +67,7 @@ func (r *Runtime) migrate() error {
runtimeChangeRequested := r.migrateRuntime != ""
requestedRuntime, runtimeExists := r.ociRuntimes[r.migrateRuntime]
if !runtimeExists && runtimeChangeRequested {
- return errors.Wrapf(define.ErrInvalidArg, "change to runtime %q requested but no such runtime is defined", r.migrateRuntime)
+ return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", r.migrateRuntime, define.ErrInvalidArg)
}
for _, ctr := range allCtrs {
@@ -93,7 +92,7 @@ func (r *Runtime) migrate() error {
if needsWrite {
if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
- return errors.Wrapf(err, "error rewriting config for container %s", ctr.ID())
+ return fmt.Errorf("error rewriting config for container %s: %w", ctr.ID(), err)
}
}
}
diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go
index dca0ffc8a..25e48de14 100644
--- a/libpod/runtime_pod.go
+++ b/libpod/runtime_pod.go
@@ -2,11 +2,12 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"time"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
// Contains the public Runtime API for pods
@@ -112,7 +113,7 @@ func (r *Runtime) GetLatestPod() (*Pod, error) {
var lastCreatedTime time.Time
pods, err := r.GetAllPods()
if err != nil {
- return nil, errors.Wrapf(err, "unable to get all pods")
+ return nil, fmt.Errorf("unable to get all pods: %w", err)
}
if len(pods) == 0 {
return nil, define.ErrNoSuchPod
@@ -146,7 +147,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) {
pods = append(pods, c.PodID())
pod, err := r.GetPod(c.PodID())
if err != nil {
- if errors.Cause(err) == define.ErrPodRemoved || errors.Cause(err) == define.ErrNoSuchPod {
+ if errors.Is(err, define.ErrPodRemoved) || errors.Is(err, define.ErrNoSuchPod) {
continue
}
return nil, err
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index dcc3a044f..75ff24e41 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -17,8 +18,7 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/specgen"
- spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
+ runcconfig "github.com/opencontainers/runc/libcontainer/configs"
"github.com/sirupsen/logrus"
)
@@ -38,14 +38,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
for _, option := range options {
if err := option(pod); err != nil {
- return nil, errors.Wrapf(err, "error running pod create option")
+ return nil, fmt.Errorf("error running pod create option: %w", err)
}
}
// Allocate a lock for the pod
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new pod")
+ return nil, fmt.Errorf("error allocating lock for new pod: %w", err)
}
pod.lock = lock
pod.config.LockID = pod.lock.ID()
@@ -66,19 +66,37 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
case config.CgroupfsCgroupsManager:
canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(pod.config.CgroupParent)
if canUseCgroup {
+ // need to actually create parent here
if pod.config.CgroupParent == "" {
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
- // No need to create it with cgroupfs - the first container to
- // launch should do it for us
if pod.config.UsePodCgroup {
pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID())
if p.InfraContainerSpec != nil {
p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
+ // cgroupfs + rootless = permission denied when creating the cgroup.
+ if !rootless.IsRootless() {
+ res, err := GetLimits(p.InfraContainerSpec.ResourceLimits)
+ if err != nil {
+ return nil, err
+ }
+ // Need to both create and update the cgroup
+ // rather than create a new path in c/common for pod cgroup creation
+ // just create as if it is a ctr and then update figures out that we need to
+ // populate the resource limits on the pod level
+ cgc, err := cgroups.New(pod.state.CgroupPath, &res)
+ if err != nil {
+ return nil, err
+ }
+ err = cgc.Update(&res)
+ if err != nil {
+ return nil, err
+ }
+ }
}
}
}
@@ -90,14 +108,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
pod.config.CgroupParent = SystemdDefaultCgroupParent
}
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
if pod.config.UsePodCgroup {
- cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()))
+ cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.InfraContainerSpec.ResourceLimits)
if err != nil {
- return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID())
+ return nil, fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
}
pod.state.CgroupPath = cgroupPath
if p.InfraContainerSpec != nil {
@@ -105,7 +123,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
}
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
+ return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
@@ -114,7 +132,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
}
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
- return nil, errors.Errorf("Pods must have an infra container to share namespaces")
+ return nil, errors.New("Pods must have an infra container to share namespaces")
}
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
logrus.Infof("Pod has an infra container, but shares no namespaces")
@@ -139,12 +157,12 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
if addPodErr = r.state.AddPod(pod); addPodErr == nil {
return pod, nil
}
- if !generateName || (errors.Cause(addPodErr) != define.ErrPodExists && errors.Cause(addPodErr) != define.ErrCtrExists) {
+ if !generateName || (!errors.Is(addPodErr, define.ErrPodExists) && !errors.Is(addPodErr, define.ErrCtrExists)) {
break
}
}
if addPodErr != nil {
- return nil, errors.Wrapf(addPodErr, "error adding pod to state")
+ return nil, fmt.Errorf("error adding pod to state: %w", addPodErr)
}
return pod, nil
@@ -193,7 +211,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
force = true
}
if !removeCtrs && numCtrs > 0 {
- return errors.Wrapf(define.ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID())
+ return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
}
// Go through and lock all containers so we can operate on them all at
@@ -221,7 +239,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Ensure state appropriate for removal
if err := ctr.checkReadyForRemoval(); err != nil {
- return errors.Wrapf(err, "pod %s has containers that are not ready to be removed", p.ID())
+ return fmt.Errorf("pod %s has containers that are not ready to be removed: %w", p.ID(), err)
}
}
@@ -239,9 +257,8 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
// New resource limits
- resLimits := new(spec.LinuxResources)
- resLimits.Pids = new(spec.LinuxPids)
- resLimits.Pids.Limit = 1 // Inhibit forks with very low pids limit
+ resLimits := new(runcconfig.Resources)
+ resLimits.PidsLimit = 1 // Inhibit forks with very low pids limit
// Don't try if we failed to retrieve the cgroup
if err == nil {
@@ -294,15 +311,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
for volName := range ctrNamedVolumes {
volume, err := r.state.Volume(volName)
- if err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
+ if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
logrus.Errorf("Retrieving volume %s: %v", volName, err)
continue
}
if !volume.Anonymous() {
continue
}
- if err := r.removeVolume(ctx, volume, false, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchVolume || errors.Cause(err) == define.ErrVolumeRemoved {
+ if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil {
+ if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) {
continue
}
logrus.Errorf("Removing volume %s: %v", volName, err)
@@ -321,9 +338,9 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
switch p.runtime.config.Engine.CgroupManager {
case config.SystemdCgroupsManager:
- if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil {
+ if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -337,7 +354,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID())
+ removalErr = fmt.Errorf("error retrieving pod %s conmon cgroup: %w", p.ID(), err)
} else {
logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
@@ -345,7 +362,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
if err == nil {
if err = conmonCgroup.Delete(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s conmon cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
@@ -354,7 +371,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
cgroup, err := cgroups.Load(p.state.CgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error retrieving pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -362,7 +379,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
if err == nil {
if err := cgroup.Delete(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -373,7 +390,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// keep going so we make sure to evict the pod before
// ending up with an inconsistent state.
if removalErr == nil {
- removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.Engine.CgroupManager, p.ID())
+ removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal)
} else {
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID())
}
@@ -399,7 +416,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Deallocate the pod lock
if err := p.lock.Free(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error freeing pod %s lock", p.ID())
+ removalErr = fmt.Errorf("error freeing pod %s lock: %w", p.ID(), err)
} else {
logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
}
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index db055f40b..9149dd72f 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -1,8 +1,9 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/events"
- "github.com/pkg/errors"
)
// renumberLocks reassigns lock numbers for all containers and pods in the
@@ -26,7 +27,7 @@ func (r *Runtime) renumberLocks() error {
for _, ctr := range allCtrs {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ return fmt.Errorf("error allocating lock for container %s: %w", ctr.ID(), err)
}
ctr.config.LockID = lock.ID()
@@ -43,7 +44,7 @@ func (r *Runtime) renumberLocks() error {
for _, pod := range allPods {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
+ return fmt.Errorf("error allocating lock for pod %s: %w", pod.ID(), err)
}
pod.config.LockID = lock.ID()
@@ -60,7 +61,7 @@ func (r *Runtime) renumberLocks() error {
for _, vol := range allVols {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name())
+ return fmt.Errorf("error allocating lock for volume %s: %w", vol.Name(), err)
}
vol.config.LockID = lock.ID()
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index 21bf8aefc..9efb30e03 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -2,11 +2,11 @@ package libpod
import (
"context"
+ "errors"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
- "github.com/pkg/errors"
)
// Contains the public Runtime API for volumes
@@ -33,7 +33,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeo
return nil
}
}
- return r.removeVolume(ctx, v, force, timeout)
+ return r.removeVolume(ctx, v, force, timeout, false)
}
// GetVolume retrieves a volume given its full name.
@@ -133,7 +133,7 @@ func (r *Runtime) PruneVolumes(ctx context.Context, filterFuncs []VolumeFilter)
report.Id = vol.Name()
var timeout *uint
if err := r.RemoveVolume(ctx, vol, false, timeout); err != nil {
- if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved {
+ if !errors.Is(err, define.ErrVolumeBeingUsed) && !errors.Is(err, define.ErrVolumeRemoved) {
report.Err = err
} else {
// We didn't remove the volume for some reason
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index f8788e183..a751d75d2 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -5,6 +5,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -16,7 +18,6 @@ import (
"github.com/containers/storage/drivers/quota"
"github.com/containers/storage/pkg/stringid"
pluginapi "github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -25,15 +26,17 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
if !r.valid {
return nil, define.ErrRuntimeStopped
}
- return r.newVolume(options...)
+ return r.newVolume(false, options...)
}
-// newVolume creates a new empty volume
-func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
+// newVolume creates a new empty volume with the given options.
+// The createPluginVolume can be set to true to make it not create the volume in the volume plugin,
+// this is required for the UpdateVolumePlugins() function. If you are not sure set this to false.
+func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
volume := newVolume(r)
for _, option := range options {
if err := option(volume); err != nil {
- return nil, errors.Wrapf(err, "running volume create option")
+ return nil, fmt.Errorf("running volume create option: %w", err)
}
}
@@ -48,17 +51,17 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
// Check if volume with given name exists.
exists, err := r.state.HasVolume(volume.config.Name)
if err != nil {
- return nil, errors.Wrapf(err, "checking if volume with name %s exists", volume.config.Name)
+ return nil, fmt.Errorf("checking if volume with name %s exists: %w", volume.config.Name, err)
}
if exists {
- return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name)
+ return nil, fmt.Errorf("volume with name %s already exists: %w", volume.config.Name, define.ErrVolumeExists)
}
// Plugin can be nil if driver is local, but that's OK - superfluous
// assignment doesn't hurt much.
- plugin, err := r.getVolumePlugin(volume.config.Driver)
+ plugin, err := r.getVolumePlugin(volume.config)
if err != nil {
- return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver)
+ return nil, fmt.Errorf("volume %s uses volume plugin %s but it could not be retrieved: %w", volume.config.Name, volume.config.Driver, err)
}
volume.plugin = plugin
@@ -70,20 +73,20 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
case "device":
if strings.ToLower(volume.config.Options["type"]) == "bind" {
if _, err := os.Stat(val); err != nil {
- return nil, errors.Wrapf(err, "invalid volume option %s for driver 'local'", key)
+ return nil, fmt.Errorf("invalid volume option %s for driver 'local': %w", key, err)
}
}
- case "o", "type", "uid", "gid", "size", "inodes", "noquota":
+ case "o", "type", "uid", "gid", "size", "inodes", "noquota", "copy", "nocopy":
// Do nothing, valid keys
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key)
+ return nil, fmt.Errorf("invalid mount option %s for driver 'local': %w", key, define.ErrInvalidArg)
}
}
}
// Now we get conditional: we either need to make the volume in the
// volume plugin, or on disk if not using a plugin.
- if volume.plugin != nil {
+ if volume.plugin != nil && !noCreatePluginVolume {
// We can't chown, or relabel, or similar the path the volume is
// using, because it's not managed by us.
// TODO: reevaluate this once we actually have volume plugins in
@@ -96,17 +99,17 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
// Create the mountpoint of this volume
volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
if err := os.MkdirAll(volPathRoot, 0700); err != nil {
- return nil, errors.Wrapf(err, "creating volume directory %q", volPathRoot)
+ return nil, fmt.Errorf("creating volume directory %q: %w", volPathRoot, err)
}
if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
+ return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
}
fullVolPath := filepath.Join(volPathRoot, "_data")
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "creating volume directory %q", fullVolPath)
+ return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
}
if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
+ return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
}
if err := LabelVolumePath(fullVolPath); err != nil {
return nil, err
@@ -131,7 +134,7 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
}
if projectQuotaSupported {
if err := q.SetQuota(fullVolPath, quota); err != nil {
- return nil, errors.Wrapf(err, "failed to set size quota size=%d inodes=%d for volume directory %q", volume.config.Size, volume.config.Inodes, fullVolPath)
+ return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err)
}
}
}
@@ -141,7 +144,7 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "allocating lock for new volume")
+ return nil, fmt.Errorf("allocating lock for new volume: %w", err)
}
volume.lock = lock
volume.config.LockID = volume.lock.ID()
@@ -158,12 +161,91 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
// Add the volume to state
if err := r.state.AddVolume(volume); err != nil {
- return nil, errors.Wrapf(err, "adding volume to state")
+ return nil, fmt.Errorf("adding volume to state: %w", err)
}
defer volume.newVolumeEvent(events.Create)
return volume, nil
}
+// UpdateVolumePlugins reads all volumes from all configured volume plugins and
+// imports them into the libpod db. It also checks if existing libpod volumes
+// are removed in the plugin, in this case we try to remove it from libpod.
+// On errors we continue and try to do as much as possible. all errors are
+// returned as array in the returned struct.
+// This function has many race conditions, it is best effort but cannot guarantee
+// a perfect state since plugins can be modified from the outside at any time.
+func (r *Runtime) UpdateVolumePlugins(ctx context.Context) *define.VolumeReload {
+ var (
+ added []string
+ removed []string
+ errs []error
+ allPluginVolumes = map[string]struct{}{}
+ )
+
+ for driverName, socket := range r.config.Engine.VolumePlugins {
+ driver, err := volplugin.GetVolumePlugin(driverName, socket, 0)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ vols, err := driver.ListVolumes()
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to read volumes from plugin %q: %w", driverName, err))
+ continue
+ }
+ for _, vol := range vols {
+ allPluginVolumes[vol.Name] = struct{}{}
+ if _, err := r.newVolume(true, WithVolumeName(vol.Name), WithVolumeDriver(driverName)); err != nil {
+ // If the volume exists this is not an error, just ignore it and log. It is very likely
+ // that the volume from the plugin was already in our db.
+ if !errors.Is(err, define.ErrVolumeExists) {
+ errs = append(errs, err)
+ continue
+ }
+ logrus.Infof("Volume %q already exists: %v", vol.Name, err)
+ continue
+ }
+ added = append(added, vol.Name)
+ }
+ }
+
+ libpodVolumes, err := r.state.AllVolumes()
+ if err != nil {
+ errs = append(errs, fmt.Errorf("cannot delete dangling plugin volumes: failed to read libpod volumes: %w", err))
+ }
+ for _, vol := range libpodVolumes {
+ if vol.UsesVolumeDriver() {
+ if _, ok := allPluginVolumes[vol.Name()]; !ok {
+ // The volume is no longer in the plugin, lets remove it from the libpod db.
+ if err := r.removeVolume(ctx, vol, false, nil, true); err != nil {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
+ // Volume is still used by at least one container. This is very bad,
+ // the plugin no longer has this but we still need it.
+ errs = append(errs, fmt.Errorf("volume was removed from the plugin %q but containers still require it: %w", vol.config.Driver, err))
+ continue
+ }
+ if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) || errors.Is(err, define.ErrMissingPlugin) {
+ // Volume was already removed, no problem just ignore it and continue.
+ continue
+ }
+
+ // some other error
+ errs = append(errs, err)
+ continue
+ }
+ // Volume was successfully removed
+ removed = append(removed, vol.Name())
+ }
+ }
+ }
+
+ return &define.VolumeReload{
+ Added: added,
+ Removed: removed,
+ Errors: errs,
+ }
+}
+
// makeVolumeInPluginIfNotExist makes a volume in the given volume plugin if it
// does not already exist.
func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin *volplugin.VolumePlugin) error {
@@ -190,15 +272,17 @@ func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin
createReq.Name = name
createReq.Options = options
if err := plugin.CreateVolume(createReq); err != nil {
- return errors.Wrapf(err, "creating volume %q in plugin %s", name, plugin.Name)
+ return fmt.Errorf("creating volume %q in plugin %s: %w", name, plugin.Name, err)
}
}
return nil
}
-// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeout *uint) error {
+// removeVolume removes the specified volume from state as well tears down its mountpoint and storage.
+// ignoreVolumePlugin is used to only remove the volume from the db and not the plugin,
+// this is required when the volume was already removed from the plugin, i.e. in UpdateVolumePlugins().
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeout *uint, ignoreVolumePlugin bool) error {
if !v.valid {
if ok, _ := r.state.HasVolume(v.Name()); !ok {
return nil
@@ -221,7 +305,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
if !force {
- return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ return fmt.Errorf("volume %s is being used by the following container(s): %s: %w", v.Name(), depsStr, define.ErrVolumeBeingUsed)
}
// We need to remove all containers using the volume
@@ -230,17 +314,17 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if err != nil {
// If the container's removed, no point in
// erroring.
- if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
continue
}
- return errors.Wrapf(err, "removing container %s that depends on volume %s", dep, v.Name())
+ return fmt.Errorf("removing container %s that depends on volume %s: %w", dep, v.Name(), err)
}
logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
if err := r.removeContainer(ctx, ctr, force, false, false, timeout); err != nil {
- return errors.Wrapf(err, "removing container %s that depends on volume %s", ctr.ID(), v.Name())
+ return fmt.Errorf("removing container %s that depends on volume %s: %w", ctr.ID(), v.Name(), err)
}
}
}
@@ -253,7 +337,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
// them.
logrus.Errorf("Unmounting volume %s: %v", v.Name(), err)
} else {
- return errors.Wrapf(err, "unmounting volume %s", v.Name())
+ return fmt.Errorf("unmounting volume %s: %w", v.Name(), err)
}
}
@@ -263,13 +347,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
var removalErr error
// If we use a volume plugin, we need to remove from the plugin.
- if v.UsesVolumeDriver() {
+ if v.UsesVolumeDriver() && !ignoreVolumePlugin {
canRemove := true
// Do we have a volume driver?
if v.plugin == nil {
canRemove = false
- removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ removalErr = fmt.Errorf("cannot remove volume %s from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), define.ErrMissingPlugin)
} else {
// Ping the plugin first to verify the volume still
// exists.
@@ -280,14 +364,14 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
getReq.Name = v.Name()
if _, err := v.plugin.GetVolume(getReq); err != nil {
canRemove = false
- removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ removalErr = fmt.Errorf("volume %s could not be retrieved from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), err)
}
}
if canRemove {
req := new(pluginapi.RemoveRequest)
req.Name = v.Name()
if err := v.plugin.RemoveVolume(req); err != nil {
- return errors.Wrapf(err, "volume %s could not be removed from plugin %s", v.Name(), v.Driver())
+ return fmt.Errorf("volume %s could not be removed from plugin %s: %w", v.Name(), v.Driver(), err)
}
}
}
@@ -297,13 +381,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if removalErr != nil {
logrus.Errorf("Removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr)
}
- return errors.Wrapf(err, "removing volume %s", v.Name())
+ return fmt.Errorf("removing volume %s: %w", v.Name(), err)
}
// Free the volume's lock
if err := v.lock.Free(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "freeing lock for volume %s", v.Name())
+ removalErr = fmt.Errorf("freeing lock for volume %s: %w", v.Name(), err)
} else {
logrus.Errorf("Freeing lock for volume %q: %v", v.Name(), err)
}
@@ -313,7 +397,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
// from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "cleaning up volume storage for %q", v.Name())
+ removalErr = fmt.Errorf("cleaning up volume storage for %q: %w", v.Name(), err)
} else {
logrus.Errorf("Cleaning up volume storage for volume %q: %v", v.Name(), err)
}
diff --git a/libpod/runtime_worker.go b/libpod/runtime_worker.go
index ca44a27f7..9d41321b2 100644
--- a/libpod/runtime_worker.go
+++ b/libpod/runtime_worker.go
@@ -1,40 +1,17 @@
package libpod
-import (
- "time"
-)
-
func (r *Runtime) startWorker() {
- if r.workerChannel == nil {
- r.workerChannel = make(chan func(), 1)
- r.workerShutdown = make(chan bool)
- }
+ r.workerChannel = make(chan func(), 10)
go func() {
- for {
- // Make sure to read all workers before
- // checking if we're about to shutdown.
- for len(r.workerChannel) > 0 {
- w := <-r.workerChannel
- w()
- }
-
- select {
- // We'll read from the shutdown channel only when all
- // items above have been processed.
- //
- // (*Runtime).Shutdown() will block until until the
- // item is read.
- case <-r.workerShutdown:
- return
-
- default:
- time.Sleep(100 * time.Millisecond)
- }
+ for w := range r.workerChannel {
+ w()
+ r.workerGroup.Done()
}
}()
}
func (r *Runtime) queueWork(f func()) {
+ r.workerGroup.Add(1)
go func() {
r.workerChannel <- f
}()
diff --git a/libpod/service.go b/libpod/service.go
index c14f5e51d..a8928277f 100644
--- a/libpod/service.go
+++ b/libpod/service.go
@@ -2,10 +2,10 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -29,7 +29,7 @@ func (p *Pod) hasServiceContainer() bool {
func (p *Pod) serviceContainer() (*Container, error) {
id := p.config.ServiceContainerID
if id == "" {
- return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container")
+ return nil, fmt.Errorf("pod has no service container: %w", define.ErrNoSuchCtr)
}
return p.runtime.state.Container(id)
}
diff --git a/libpod/shutdown/handler.go b/libpod/shutdown/handler.go
index 9add05c9c..75e9b4e8a 100644
--- a/libpod/shutdown/handler.go
+++ b/libpod/shutdown/handler.go
@@ -1,18 +1,18 @@
package shutdown
import (
+ "errors"
"os"
"os/signal"
"sync"
"syscall"
"time"
- "github.com/pkg/errors"
logrusImport "github.com/sirupsen/logrus"
)
var (
- ErrHandlerExists error = errors.New("handler with given name already exists")
+ ErrHandlerExists = errors.New("handler with given name already exists")
)
var (
diff --git a/libpod/state.go b/libpod/state.go
index 471023769..4fbd3c302 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -111,6 +111,15 @@ type State interface {
// Return a container config from the database by full ID
GetContainerConfig(id string) (*ContainerConfig, error)
+ // Add the exit code for the specified container to the database.
+ AddContainerExitCode(id string, exitCode int32) error
+
+ // Return the exit code for the specified container.
+ GetContainerExitCode(id string) (int32, error)
+
+ // Remove exit codes older than 5 minutes.
+ PruneContainerExitCodes() error
+
// Add creates a reference to an exec session in the database.
// The container the exec session is attached to will be recorded.
// The container state will not be modified.
diff --git a/libpod/stats.go b/libpod/stats.go
index 25baa378d..c7e9e5128 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -4,14 +4,16 @@
package libpod
import (
+ "fmt"
"math"
"strings"
"syscall"
"time"
+ runccgroup "github.com/opencontainers/runc/libcontainer/cgroups"
+
"github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
// GetContainerStats gets the running stats for a given container.
@@ -23,7 +25,7 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
stats.Name = c.Name()
if c.config.NoCgroups {
- return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID())
+ return nil, fmt.Errorf("cannot run top on container %s as it did not create a cgroup: %w", c.ID(), define.ErrNoCgroups)
}
if !c.batched {
@@ -34,8 +36,9 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
}
}
+ // returns stats with the fields' default values respective of their type
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
- return stats, define.ErrCtrStateInvalid
+ return stats, nil
}
if previousStats == nil {
@@ -52,13 +55,13 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
}
cgroup, err := cgroups.Load(cgroupPath)
if err != nil {
- return stats, errors.Wrapf(err, "unable to load cgroup at %s", cgroupPath)
+ return stats, fmt.Errorf("unable to load cgroup at %s: %w", cgroupPath, err)
}
// Ubuntu does not have swap memory in cgroups because swap is often not enabled.
cgroupStats, err := cgroup.Stat()
if err != nil {
- return stats, errors.Wrapf(err, "unable to obtain cgroup stats")
+ return stats, fmt.Errorf("unable to obtain cgroup stats: %w", err)
}
conState := c.state.State
netStats, err := getContainerNetIO(c)
@@ -68,29 +71,29 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
// If the current total usage in the cgroup is less than what was previously
// recorded then it means the container was restarted and runs in a new cgroup
- if previousStats.Duration > cgroupStats.CPU.Usage.Total {
+ if previousStats.Duration > cgroupStats.CpuStats.CpuUsage.TotalUsage {
previousStats = &define.ContainerStats{}
}
previousCPU := previousStats.CPUNano
now := uint64(time.Now().UnixNano())
- stats.Duration = cgroupStats.CPU.Usage.Total
+ stats.Duration = cgroupStats.CpuStats.CpuUsage.TotalUsage
stats.UpTime = time.Duration(stats.Duration)
stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano)
// calc the average cpu usage for the time the container is running
stats.AvgCPU = calculateCPUPercent(cgroupStats, 0, now, uint64(c.state.StartedTime.UnixNano()))
- stats.MemUsage = cgroupStats.Memory.Usage.Usage
+ stats.MemUsage = cgroupStats.MemoryStats.Usage.Usage
stats.MemLimit = c.getMemLimit()
stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100
stats.PIDs = 0
if conState == define.ContainerStateRunning || conState == define.ContainerStatePaused {
- stats.PIDs = cgroupStats.Pids.Current
+ stats.PIDs = cgroupStats.PidsStats.Current
}
stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats)
- stats.CPUNano = cgroupStats.CPU.Usage.Total
- stats.CPUSystemNano = cgroupStats.CPU.Usage.Kernel
+ stats.CPUNano = cgroupStats.CpuStats.CpuUsage.TotalUsage
+ stats.CPUSystemNano = cgroupStats.CpuStats.CpuUsage.UsageInKernelmode
stats.SystemNano = now
- stats.PerCPU = cgroupStats.CPU.Usage.PerCPU
+ stats.PerCPU = cgroupStats.CpuStats.CpuUsage.PercpuUsage
// Handle case where the container is not in a network namespace
if netStats != nil {
stats.NetInput = netStats.TxBytes
@@ -132,10 +135,10 @@ func (c *Container) getMemLimit() uint64 {
// previousCPU is the last value of stats.CPU.Usage.Total measured at the time previousSystem.
// (now - previousSystem) is the time delta in nanoseconds, between the measurement in previousCPU
// and the updated value in stats.
-func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, now, previousSystem uint64) float64 {
+func calculateCPUPercent(stats *runccgroup.Stats, previousCPU, now, previousSystem uint64) float64 {
var (
cpuPercent = 0.0
- cpuDelta = float64(stats.CPU.Usage.Total - previousCPU)
+ cpuDelta = float64(stats.CpuStats.CpuUsage.TotalUsage - previousCPU)
systemDelta = float64(now - previousSystem)
)
if systemDelta > 0.0 && cpuDelta > 0.0 {
@@ -145,8 +148,8 @@ func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, now, previousSyste
return cpuPercent
}
-func calculateBlockIO(stats *cgroups.Metrics) (read uint64, write uint64) {
- for _, blkIOEntry := range stats.Blkio.IoServiceBytesRecursive {
+func calculateBlockIO(stats *runccgroup.Stats) (read uint64, write uint64) {
+ for _, blkIOEntry := range stats.BlkioStats.IoServiceBytesRecursive {
switch strings.ToLower(blkIOEntry.Op) {
case "read":
read += blkIOEntry.Value
diff --git a/libpod/storage.go b/libpod/storage.go
index a85348729..dc19a5d4f 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"time"
istorage "github.com/containers/image/v5/storage"
@@ -10,7 +11,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -184,7 +184,7 @@ func (r *storageService) DeleteContainer(idOrName string) error {
}
err = r.store.DeleteContainer(container.ID)
if err != nil {
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
logrus.Infof("Storage for container %s already removed", container.ID)
} else {
logrus.Debugf("Failed to delete container %q: %v", container.ID, err)
@@ -218,7 +218,7 @@ func (r *storageService) GetContainerMetadata(idOrName string) (RuntimeContainer
func (r *storageService) MountContainerImage(idOrName string) (string, error) {
container, err := r.store.Container(idOrName)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
@@ -281,7 +281,7 @@ func (r *storageService) MountedContainerImage(idOrName string) (int, error) {
func (r *storageService) GetMountpoint(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
@@ -297,7 +297,7 @@ func (r *storageService) GetMountpoint(id string) (string, error) {
func (r *storageService) GetWorkDir(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
@@ -308,7 +308,7 @@ func (r *storageService) GetWorkDir(id string) (string, error) {
func (r *storageService) GetRunDir(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
diff --git a/libpod/util.go b/libpod/util.go
index 1753b4f34..a6e6a4f3e 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -20,7 +20,6 @@ import (
"github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -93,7 +92,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
return false, err
}
case <-timeoutChan:
- return false, errors.Wrapf(define.ErrInternal, "timed out waiting for file %s", path)
+ return false, fmt.Errorf("timed out waiting for file %s: %w", path, define.ErrInternal)
}
}
}
@@ -123,15 +122,15 @@ func sortMounts(m []spec.Mount) []spec.Mount {
func validPodNSOption(p *Pod, ctrPod string) error {
if p == nil {
- return errors.Wrapf(define.ErrInvalidArg, "pod passed in was nil. Container may not be associated with a pod")
+ return fmt.Errorf("pod passed in was nil. Container may not be associated with a pod: %w", define.ErrInvalidArg)
}
if ctrPod == "" {
- return errors.Wrapf(define.ErrInvalidArg, "container is not a member of any pod")
+ return fmt.Errorf("container is not a member of any pod: %w", define.ErrInvalidArg)
}
if ctrPod != p.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "pod passed in is not the pod the container is associated with")
+ return fmt.Errorf("pod passed in is not the pod the container is associated with: %w", define.ErrInvalidArg)
}
return nil
}
@@ -232,18 +231,18 @@ func DefaultSeccompPath() (string, error) {
func checkDependencyContainer(depCtr, ctr *Container) error {
state, err := depCtr.State()
if err != nil {
- return errors.Wrapf(err, "error accessing dependency container %s state", depCtr.ID())
+ return fmt.Errorf("error accessing dependency container %s state: %w", depCtr.ID(), err)
}
if state == define.ContainerStateRemoving {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot use container %s as a dependency as it is being removed", depCtr.ID())
+ return fmt.Errorf("cannot use container %s as a dependency as it is being removed: %w", depCtr.ID(), define.ErrCtrStateInvalid)
}
if depCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
+ return fmt.Errorf("must specify another container: %w", define.ErrInvalidArg)
}
if ctr.config.Pod != "" && depCtr.PodID() != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, depCtr.ID())
+ return fmt.Errorf("container has joined pod %s and dependency container %s is not a member of the pod: %w", ctr.config.Pod, depCtr.ID(), define.ErrInvalidArg)
}
return nil
@@ -347,7 +346,7 @@ func makeInspectPortBindings(bindings []types.PortMapping, expose map[uint16][]s
func writeStringToPath(path, contents, mountLabel string, uid, gid int) error {
f, err := os.Create(path)
if err != nil {
- return errors.Wrapf(err, "unable to create %s", path)
+ return fmt.Errorf("unable to create %s: %w", path, err)
}
defer f.Close()
if err := f.Chown(uid, gid); err != nil {
@@ -355,7 +354,7 @@ func writeStringToPath(path, contents, mountLabel string, uid, gid int) error {
}
if _, err := f.WriteString(contents); err != nil {
- return errors.Wrapf(err, "unable to write %s", path)
+ return fmt.Errorf("unable to write %s: %w", path, err)
}
// Relabel runDirResolv for the container
if err := label.Relabel(path, mountLabel, false); err != nil {
diff --git a/libpod/util_linux.go b/libpod/util_linux.go
index fe98056dc..7c79e6ce4 100644
--- a/libpod/util_linux.go
+++ b/libpod/util_linux.go
@@ -11,8 +11,8 @@ import (
"github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -20,7 +20,7 @@ import (
// systemdSliceFromPath makes a new systemd slice under the given parent with
// the given name.
// The parent must be a slice. The name must NOT include ".slice"
-func systemdSliceFromPath(parent, name string) (string, error) {
+func systemdSliceFromPath(parent, name string, resources *spec.LinuxResources) (string, error) {
cgroupPath, err := assembleSystemdCgroupName(parent, name)
if err != nil {
return "", err
@@ -28,8 +28,8 @@ func systemdSliceFromPath(parent, name string) (string, error) {
logrus.Debugf("Created cgroup path %s for parent %s and name %s", cgroupPath, parent, name)
- if err := makeSystemdCgroup(cgroupPath); err != nil {
- return "", errors.Wrapf(err, "error creating cgroup %s", cgroupPath)
+ if err := makeSystemdCgroup(cgroupPath, resources); err != nil {
+ return "", fmt.Errorf("error creating cgroup %s: %w", cgroupPath, err)
}
logrus.Debugf("Created cgroup %s", cgroupPath)
@@ -45,8 +45,12 @@ func getDefaultSystemdCgroup() string {
}
// makeSystemdCgroup creates a systemd Cgroup at the given location.
-func makeSystemdCgroup(path string) error {
- controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup())
+func makeSystemdCgroup(path string, resources *spec.LinuxResources) error {
+ res, err := GetLimits(resources)
+ if err != nil {
+ return err
+ }
+ controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup(), &res)
if err != nil {
return err
}
@@ -54,12 +58,20 @@ func makeSystemdCgroup(path string) error {
if rootless.IsRootless() {
return controller.CreateSystemdUserUnit(path, rootless.GetRootlessUID())
}
- return controller.CreateSystemdUnit(path)
+ err = controller.CreateSystemdUnit(path)
+ if err != nil {
+ return err
+ }
+ return nil
}
// deleteSystemdCgroup deletes the systemd cgroup at the given location
-func deleteSystemdCgroup(path string) error {
- controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup())
+func deleteSystemdCgroup(path string, resources *spec.LinuxResources) error {
+ res, err := GetLimits(resources)
+ if err != nil {
+ return err
+ }
+ controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup(), &res)
if err != nil {
return err
}
@@ -82,7 +94,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
const sliceSuffix = ".slice"
if !strings.HasSuffix(baseSlice, sliceSuffix) {
- return "", errors.Wrapf(define.ErrInvalidArg, "cannot assemble cgroup path with base %q - must end in .slice", baseSlice)
+ return "", fmt.Errorf("cannot assemble cgroup path with base %q - must end in .slice: %w", baseSlice, define.ErrInvalidArg)
}
noSlice := strings.TrimSuffix(baseSlice, sliceSuffix)
@@ -100,17 +112,17 @@ var lvpReleaseLabel = label.ReleaseLabel
func LabelVolumePath(path string) error {
_, mountLabel, err := lvpInitLabels([]string{})
if err != nil {
- return errors.Wrapf(err, "error getting default mountlabels")
+ return fmt.Errorf("error getting default mountlabels: %w", err)
}
if err := lvpReleaseLabel(mountLabel); err != nil {
- return errors.Wrapf(err, "error releasing label %q", mountLabel)
+ return fmt.Errorf("error releasing label %q: %w", mountLabel, err)
}
if err := lvpRelabel(path, mountLabel, true); err != nil {
if err == syscall.ENOTSUP {
logrus.Debugf("Labeling not supported on %q", path)
} else {
- return errors.Wrapf(err, "error setting selinux label for %s to %q as shared", path, mountLabel)
+ return fmt.Errorf("error setting selinux label for %s to %q as shared: %w", path, mountLabel, err)
}
}
return nil
diff --git a/libpod/volume.go b/libpod/volume.go
index ab461a37f..2e8cd77a5 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -55,6 +55,8 @@ type VolumeConfig struct {
// DisableQuota indicates that the volume should completely disable using any
// quota tracking.
DisableQuota bool `json:"disableQuota,omitempty"`
+ // Timeout allows users to override the default driver timeout of 5 seconds
+ Timeout int
}
// VolumeState holds the volume's mutable state.
diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go
index 3d721410b..dd2f3fd01 100644
--- a/libpod/volume_inspect.go
+++ b/libpod/volume_inspect.go
@@ -1,9 +1,10 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
pluginapi "github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -29,7 +30,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
data.Mountpoint = v.state.MountPoint
if v.plugin == nil {
- return nil, errors.Wrapf(define.ErrMissingPlugin, "volume %s uses volume plugin %s but it is not available, cannot inspect", v.Name(), v.config.Driver)
+ return nil, fmt.Errorf("volume %s uses volume plugin %s but it is not available, cannot inspect: %w", v.Name(), v.config.Driver, define.ErrMissingPlugin)
}
// Retrieve status for the volume.
@@ -38,7 +39,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
req.Name = v.Name()
resp, err := v.plugin.GetVolume(req)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving volume %s information from plugin %s", v.Name(), v.Driver())
+ return nil, fmt.Errorf("error retrieving volume %s information from plugin %s: %w", v.Name(), v.Driver(), err)
}
if resp != nil {
data.Status = resp.Status
@@ -63,6 +64,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
data.MountCount = v.state.MountCount
data.NeedsCopyUp = v.state.NeedsCopyUp
data.NeedsChown = v.state.NeedsChown
+ data.Timeout = v.config.Timeout
return data, nil
}
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index e0ebb729d..43c3f9b0b 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -1,11 +1,11 @@
package libpod
import (
+ "fmt"
"os"
"path/filepath"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
// Creates a new volume
@@ -55,6 +55,12 @@ func (v *Volume) needsMount() bool {
if _, ok := v.config.Options["NOQUOTA"]; ok {
index++
}
+ if _, ok := v.config.Options["nocopy"]; ok {
+ index++
+ }
+ if _, ok := v.config.Options["copy"]; ok {
+ index++
+ }
// when uid or gid is set there is also the "o" option
// set so we have to ignore this one as well
if index > 0 {
@@ -84,7 +90,7 @@ func (v *Volume) save() error {
func (v *Volume) refresh() error {
lock, err := v.runtime.lockManager.AllocateAndRetrieveLock(v.config.LockID)
if err != nil {
- return errors.Wrapf(err, "acquiring lock %d for volume %s", v.config.LockID, v.Name())
+ return fmt.Errorf("acquiring lock %d for volume %s: %w", v.config.LockID, v.Name(), err)
}
v.lock = lock
diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go
index 7d7dea9d0..cfd60554d 100644
--- a/libpod/volume_internal_linux.go
+++ b/libpod/volume_internal_linux.go
@@ -4,12 +4,13 @@
package libpod
import (
+ "errors"
+ "fmt"
"os/exec"
"strings"
"github.com/containers/podman/v4/libpod/define"
pluginapi "github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -51,7 +52,7 @@ func (v *Volume) mount() error {
// the same one for everything.
if v.UsesVolumeDriver() {
if v.plugin == nil {
- return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ return fmt.Errorf("volume plugin %s (needed by volume %s) missing: %w", v.Driver(), v.Name(), define.ErrMissingPlugin)
}
req := new(pluginapi.MountRequest)
@@ -83,7 +84,7 @@ func (v *Volume) mount() error {
// TODO: might want to cache this path in the runtime?
mountPath, err := exec.LookPath("mount")
if err != nil {
- return errors.Wrapf(err, "locating 'mount' binary")
+ return fmt.Errorf("locating 'mount' binary: %w", err)
}
mountArgs := []string{}
if volOptions != "" {
@@ -103,7 +104,7 @@ func (v *Volume) mount() error {
logrus.Debugf("Running mount command: %s %s", mountPath, strings.Join(mountArgs, " "))
if output, err := mountCmd.CombinedOutput(); err != nil {
logrus.Debugf("Mount %v failed with %v", mountCmd, err)
- return errors.Errorf(string(output))
+ return errors.New(string(output))
}
logrus.Debugf("Mounted volume %s", v.Name())
@@ -148,7 +149,7 @@ func (v *Volume) unmount(force bool) error {
if v.state.MountCount == 0 {
if v.UsesVolumeDriver() {
if v.plugin == nil {
- return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ return fmt.Errorf("volume plugin %s (needed by volume %s) missing: %w", v.Driver(), v.Name(), define.ErrMissingPlugin)
}
req := new(pluginapi.UnmountRequest)
@@ -168,7 +169,7 @@ func (v *Volume) unmount(force bool) error {
// Ignore EINVAL - the mount no longer exists.
return nil
}
- return errors.Wrapf(err, "unmounting volume %s", v.Name())
+ return fmt.Errorf("unmounting volume %s: %w", v.Name(), err)
}
logrus.Debugf("Unmounted volume %s", v.Name())
}
diff --git a/pkg/api/handlers/compat/auth.go b/pkg/api/handlers/compat/auth.go
index 7804c8230..37d2b784d 100644
--- a/pkg/api/handlers/compat/auth.go
+++ b/pkg/api/handlers/compat/auth.go
@@ -3,6 +3,7 @@ package compat
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"strings"
@@ -14,7 +15,6 @@ import (
api "github.com/containers/podman/v4/pkg/api/types"
"github.com/containers/podman/v4/pkg/domain/entities"
docker "github.com/docker/docker/api/types"
- "github.com/pkg/errors"
)
func stripAddressOfScheme(address string) string {
@@ -28,7 +28,7 @@ func Auth(w http.ResponseWriter, r *http.Request) {
var authConfig docker.AuthConfig
err := json.NewDecoder(r.Body).Decode(&authConfig)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse request"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse request: %w", err))
return
}
diff --git a/pkg/api/handlers/compat/changes.go b/pkg/api/handlers/compat/changes.go
index af0143fcf..c3f612f04 100644
--- a/pkg/api/handlers/compat/changes.go
+++ b/pkg/api/handlers/compat/changes.go
@@ -1,6 +1,7 @@
package compat
import (
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -8,7 +9,6 @@ import (
"github.com/containers/podman/v4/pkg/api/handlers/utils"
api "github.com/containers/podman/v4/pkg/api/types"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func Changes(w http.ResponseWriter, r *http.Request) {
@@ -20,7 +20,7 @@ func Changes(w http.ResponseWriter, r *http.Request) {
DiffType string `schema:"diffType"`
}{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
var diffType define.DiffType
@@ -32,7 +32,7 @@ func Changes(w http.ResponseWriter, r *http.Request) {
case "image":
diffType = define.DiffImage
default:
- utils.Error(w, http.StatusBadRequest, errors.Errorf("invalid diffType value %q", query.DiffType))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("invalid diffType value %q", query.DiffType))
return
}
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go
index 616f0a138..ae063dc9f 100644
--- a/pkg/api/handlers/compat/containers.go
+++ b/pkg/api/handlers/compat/containers.go
@@ -2,6 +2,7 @@ package compat
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
"sort"
@@ -27,7 +28,6 @@ import (
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -46,7 +46,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -73,7 +73,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
reports, err := containerEngine.ContainerRm(r.Context(), []string{name}, options)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
@@ -83,7 +83,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
}
if len(reports) > 0 && reports[0].Err != nil {
err = reports[0].Err
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
@@ -110,12 +110,12 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to decode filter parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode filter parameters for %s: %w", r.URL.String(), err))
return
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -164,7 +164,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
for _, ctnr := range containers {
api, err := LibpodToContainer(ctnr, query.Size)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
// container was removed between the initial fetch of the list and conversion
logrus.Debugf("Container %s removed between initial fetch and conversion, ignoring in output", ctnr.ID())
continue
@@ -187,7 +187,7 @@ func GetContainer(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -215,7 +215,7 @@ func KillContainer(w http.ResponseWriter, r *http.Request) {
Signal: "KILL",
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -228,12 +228,12 @@ func KillContainer(w http.ResponseWriter, r *http.Request) {
}
report, err := containerEngine.ContainerKill(r.Context(), []string{name}, options)
if err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid ||
- errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) ||
+ errors.Is(err, define.ErrCtrStopped) {
utils.Error(w, http.StatusConflict, err)
return
}
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
@@ -289,8 +289,10 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error
return nil, err
}
stateStr := state.String()
- if stateStr == "configured" {
- stateStr = "created"
+
+ // Some docker states are not the same as ours. This makes sure the state string stays true to the Docker API
+ if state == define.ContainerStateCreated {
+ stateStr = define.ContainerStateConfigured.String()
}
switch state {
@@ -395,6 +397,15 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error
}, nil
}
+func convertSecondaryIPPrefixLen(input *define.InspectNetworkSettings, output *types.NetworkSettings) {
+ for index, ip := range input.SecondaryIPAddresses {
+ output.SecondaryIPAddresses[index].PrefixLen = ip.PrefixLength
+ }
+ for index, ip := range input.SecondaryIPv6Addresses {
+ output.SecondaryIPv6Addresses[index].PrefixLen = ip.PrefixLength
+ }
+}
+
func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON, error) {
_, imageName := l.Image()
inspect, err := l.Inspect(sz)
@@ -420,9 +431,9 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
state.Running = true
}
- // docker calls the configured state "created"
- if state.Status == define.ContainerStateConfigured.String() {
- state.Status = define.ContainerStateCreated.String()
+ // Dockers created state is our configured state
+ if state.Status == define.ContainerStateCreated.String() {
+ state.Status = define.ContainerStateConfigured.String()
}
if l.HasHealthCheck() && state.Status != "created" {
@@ -510,7 +521,7 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
for ep := range inspect.HostConfig.PortBindings {
splitp := strings.SplitN(ep, "/", 2)
if len(splitp) != 2 {
- return nil, errors.Errorf("PORT/PROTOCOL Format required for %q", ep)
+ return nil, fmt.Errorf("PORT/PROTOCOL Format required for %q", ep)
}
exposedPort, err := nat.NewPort(splitp[1], splitp[0])
if err != nil {
@@ -585,6 +596,9 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
if err := json.Unmarshal(n, &networkSettings); err != nil {
return nil, err
}
+
+ convertSecondaryIPPrefixLen(inspect.NetworkSettings, &networkSettings)
+
// do not report null instead use an empty map
if networkSettings.Networks == nil {
networkSettings.Networks = map[string]*network.EndpointSettings{}
@@ -614,7 +628,7 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) {
Name string `schema:"name"`
}{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -625,7 +639,7 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) {
}
if _, err := runtime.RenameContainer(r.Context(), ctr, query.Name); err != nil {
- if errors.Cause(err) == define.ErrPodExists || errors.Cause(err) == define.ErrCtrExists {
+ if errors.Is(err, define.ErrPodExists) || errors.Is(err, define.ErrCtrExists) {
utils.Error(w, http.StatusConflict, err)
return
}
diff --git a/pkg/api/handlers/compat/containers_archive.go b/pkg/api/handlers/compat/containers_archive.go
index 45b13818b..cadfb7bd5 100644
--- a/pkg/api/handlers/compat/containers_archive.go
+++ b/pkg/api/handlers/compat/containers_archive.go
@@ -2,8 +2,12 @@ package compat
import (
"encoding/json"
+ "fmt"
"net/http"
"os"
+ "strings"
+
+ "errors"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
@@ -13,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -27,7 +30,7 @@ func Archive(w http.ResponseWriter, r *http.Request) {
case http.MethodHead, http.MethodGet:
handleHeadAndGet(w, r, decoder, runtime)
default:
- utils.Error(w, http.StatusNotImplemented, errors.Errorf("unsupported method: %v", r.Method))
+ utils.Error(w, http.StatusNotImplemented, fmt.Errorf("unsupported method: %v", r.Method))
}
}
@@ -38,7 +41,7 @@ func handleHeadAndGet(w http.ResponseWriter, r *http.Request, decoder *schema.De
err := decoder.Decode(&query, r.URL.Query())
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query"))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("couldn't decode the query: %w", err))
return
}
@@ -64,7 +67,7 @@ func handleHeadAndGet(w http.ResponseWriter, r *http.Request, decoder *schema.De
w.Header().Add(copy.XDockerContainerPathStatHeader, statHeader)
}
- if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == copy.ErrENOENT {
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, copy.ErrENOENT) {
// 404 is returned for an absent container and path. The
// clients must deal with it accordingly.
utils.Error(w, http.StatusNotFound, err)
@@ -94,25 +97,24 @@ func handleHeadAndGet(w http.ResponseWriter, r *http.Request, decoder *schema.De
func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder, runtime *libpod.Runtime) {
query := struct {
- Path string `schema:"path"`
- Chown bool `schema:"copyUIDGID"`
- Rename string `schema:"rename"`
- // TODO handle params below
- NoOverwriteDirNonDir bool `schema:"noOverwriteDirNonDir"`
+ Path string `schema:"path"`
+ Chown bool `schema:"copyUIDGID"`
+ Rename string `schema:"rename"`
+ NoOverwriteDirNonDir bool `schema:"noOverwriteDirNonDir"`
}{
Chown: utils.IsLibpodRequest(r), // backward compatibility
}
err := decoder.Decode(&query, r.URL.Query())
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query"))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("couldn't decode the query: %w", err))
return
}
var rename map[string]string
if query.Rename != "" {
if err := json.Unmarshal([]byte(query.Rename), &rename); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query"))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("couldn't decode the query field 'rename': %w", err))
return
}
}
@@ -120,15 +122,25 @@ func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder,
containerName := utils.GetName(r)
containerEngine := abi.ContainerEngine{Libpod: runtime}
- copyOptions := entities.CopyOptions{Chown: query.Chown, Rename: rename}
- copyFunc, err := containerEngine.ContainerCopyFromArchive(r.Context(), containerName, query.Path, r.Body, copyOptions)
- if errors.Cause(err) == define.ErrNoSuchCtr || os.IsNotExist(err) {
- // 404 is returned for an absent container and path. The
- // clients must deal with it accordingly.
- utils.Error(w, http.StatusNotFound, errors.Wrap(err, "the container doesn't exists"))
- return
- } else if err != nil {
- utils.Error(w, http.StatusInternalServerError, err)
+ copyFunc, err := containerEngine.ContainerCopyFromArchive(r.Context(), containerName, query.Path, r.Body,
+ entities.CopyOptions{
+ Chown: query.Chown,
+ NoOverwriteDirNonDir: query.NoOverwriteDirNonDir,
+ Rename: rename,
+ })
+ if err != nil {
+ switch {
+ case errors.Is(err, define.ErrNoSuchCtr) || os.IsNotExist(err):
+ // 404 is returned for an absent container and path. The
+ // clients must deal with it accordingly.
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("the container doesn't exists: %w", err))
+ case strings.Contains(err.Error(), "copier: put: error creating file"):
+ // Not the best test but need to break this out for compatibility
+ // See vendor/github.com/containers/buildah/copier/copier.go:1585
+ utils.Error(w, http.StatusBadRequest, err)
+ default:
+ utils.Error(w, http.StatusInternalServerError, err)
+ }
return
}
diff --git a/pkg/api/handlers/compat/containers_attach.go b/pkg/api/handlers/compat/containers_attach.go
index c8905808f..e804e628a 100644
--- a/pkg/api/handlers/compat/containers_attach.go
+++ b/pkg/api/handlers/compat/containers_attach.go
@@ -1,6 +1,8 @@
package compat
import (
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -9,7 +11,6 @@ import (
"github.com/containers/podman/v4/pkg/api/server/idle"
api "github.com/containers/podman/v4/pkg/api/types"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -60,13 +61,13 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) {
streams = nil
}
if useStreams && !streams.Stdout && !streams.Stderr && !streams.Stdin {
- utils.Error(w, http.StatusBadRequest, errors.Errorf("at least one of stdin, stdout, stderr must be true"))
+ utils.Error(w, http.StatusBadRequest, errors.New("at least one of stdin, stdout, stderr must be true"))
return
}
// At least one of these must be set
if !query.Stream && !query.Logs {
- utils.Error(w, http.StatusBadRequest, errors.Errorf("at least one of Logs or Stream must be set"))
+ utils.Error(w, http.StatusBadRequest, errors.New("at least one of Logs or Stream must be set"))
return
}
@@ -85,16 +86,16 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) {
// For Docker compatibility, we need to re-initialize containers in these states.
if state == define.ContainerStateConfigured || state == define.ContainerStateExited || state == define.ContainerStateStopped {
if err := ctr.Init(r.Context(), ctr.PodID() != ""); err != nil {
- utils.Error(w, http.StatusConflict, errors.Wrapf(err, "error preparing container %s for attach", ctr.ID()))
+ utils.Error(w, http.StatusConflict, fmt.Errorf("error preparing container %s for attach: %w", ctr.ID(), err))
return
}
} else if !(state == define.ContainerStateCreated || state == define.ContainerStateRunning) {
- utils.InternalServerError(w, errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers - currently in state %s", state.String()))
+ utils.InternalServerError(w, fmt.Errorf("can only attach to created or running containers - currently in state %s: %w", state.String(), define.ErrCtrStateInvalid))
return
}
logErr := func(e error) {
- logrus.Error(errors.Wrapf(e, "error attaching to container %s", ctr.ID()))
+ logrus.Errorf("Error attaching to container %s: %v", ctr.ID(), e)
}
// Perform HTTP attach.
diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go
index b9b7f6708..9fff8b4c8 100644
--- a/pkg/api/handlers/compat/containers_create.go
+++ b/pkg/api/handlers/compat/containers_create.go
@@ -2,6 +2,7 @@ package compat
import (
"encoding/json"
+ "errors"
"fmt"
"net"
"net/http"
@@ -26,7 +27,6 @@ import (
"github.com/containers/storage"
"github.com/docker/docker/api/types/mount"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func CreateContainer(w http.ResponseWriter, r *http.Request) {
@@ -38,14 +38,14 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
// override any golang type defaults
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
// compatible configuration
body := handlers.CreateContainerConfig{}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decode(): %w", err))
return
}
@@ -53,37 +53,37 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
body.Name = query.Name
if len(body.HostConfig.Links) > 0 {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(utils.ErrLinkNotSupport, "bad parameter"))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("bad parameter: %w", utils.ErrLinkNotSupport))
return
}
rtc, err := runtime.GetConfig()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to get runtime config"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to get runtime config: %w", err))
return
}
imageName, err := utils.NormalizeToDockerHub(r, body.Config.Image)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
body.Config.Image = imageName
newImage, resolvedName, err := runtime.LibimageRuntime().LookupImage(body.Config.Image, nil)
if err != nil {
- if errors.Cause(err) == storage.ErrImageUnknown {
- utils.Error(w, http.StatusNotFound, errors.Wrap(err, "No such image"))
+ if errors.Is(err, storage.ErrImageUnknown) {
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("no such image: %w", err))
return
}
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error looking up image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error looking up image: %w", err))
return
}
// Take body structure and convert to cliopts
cliOpts, args, err := cliOpts(body, rtc)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "make cli opts()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("make cli opts(): %w", err))
return
}
@@ -100,7 +100,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
sg := specgen.NewSpecGenerator(imgNameOrID, cliOpts.RootFS)
if err := specgenutil.FillOutSpecGen(sg, cliOpts, args); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "fill out specgen"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("fill out specgen: %w", err))
return
}
// moby always create the working directory
@@ -109,7 +109,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
ic := abi.ContainerEngine{Libpod: runtime}
report, err := ic.ContainerCreate(r.Context(), sg)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "container create"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("container create: %w", err))
return
}
createResponse := entities.ContainerCreateResponse{
@@ -261,8 +261,13 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
}
}
- // netMode
- nsmode, networks, netOpts, err := specgen.ParseNetworkFlag([]string{string(cc.HostConfig.NetworkMode)})
+ // special case for NetworkMode, the podman default is slirp4netns for
+ // rootless but for better docker compat we want bridge.
+ netmode := string(cc.HostConfig.NetworkMode)
+ if netmode == "" || netmode == "default" {
+ netmode = "bridge"
+ }
+ nsmode, networks, netOpts, err := specgen.ParseNetworkFlag([]string{netmode})
if err != nil {
return nil, nil, err
}
@@ -278,6 +283,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
Network: nsmode,
PublishPorts: specPorts,
NetworkOptions: netOpts,
+ NoHosts: rtc.Containers.NoHosts,
}
// network names
@@ -294,7 +300,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
if len(endpoint.IPAddress) > 0 {
staticIP := net.ParseIP(endpoint.IPAddress)
if staticIP == nil {
- return nil, nil, errors.Errorf("failed to parse the ip address %q", endpoint.IPAddress)
+ return nil, nil, fmt.Errorf("failed to parse the ip address %q", endpoint.IPAddress)
}
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
}
@@ -304,7 +310,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
if len(endpoint.IPAMConfig.IPv4Address) > 0 {
staticIP := net.ParseIP(endpoint.IPAMConfig.IPv4Address)
if staticIP == nil {
- return nil, nil, errors.Errorf("failed to parse the ipv4 address %q", endpoint.IPAMConfig.IPv4Address)
+ return nil, nil, fmt.Errorf("failed to parse the ipv4 address %q", endpoint.IPAMConfig.IPv4Address)
}
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
}
@@ -312,7 +318,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
if len(endpoint.IPAMConfig.IPv6Address) > 0 {
staticIP := net.ParseIP(endpoint.IPAMConfig.IPv6Address)
if staticIP == nil {
- return nil, nil, errors.Errorf("failed to parse the ipv6 address %q", endpoint.IPAMConfig.IPv6Address)
+ return nil, nil, fmt.Errorf("failed to parse the ipv6 address %q", endpoint.IPAMConfig.IPv6Address)
}
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
}
@@ -321,7 +327,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
if len(endpoint.MacAddress) > 0 {
staticMac, err := net.ParseMAC(endpoint.MacAddress)
if err != nil {
- return nil, nil, errors.Errorf("failed to parse the mac address %q", endpoint.MacAddress)
+ return nil, nil, fmt.Errorf("failed to parse the mac address %q", endpoint.MacAddress)
}
netOpts.StaticMAC = types.HardwareAddr(staticMac)
}
@@ -427,7 +433,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
}
if cc.HostConfig.Resources.NanoCPUs > 0 {
if cliOpts.CPUPeriod != 0 || cliOpts.CPUQuota != 0 {
- return nil, nil, errors.Errorf("NanoCpus conflicts with CpuPeriod and CpuQuota")
+ return nil, nil, fmt.Errorf("NanoCpus conflicts with CpuPeriod and CpuQuota")
}
cliOpts.CPUPeriod = 100000
cliOpts.CPUQuota = cc.HostConfig.Resources.NanoCPUs / 10000
@@ -438,7 +444,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
cliOpts.Volume = append(cliOpts.Volume, vol)
// Extract the destination so we don't add duplicate mounts in
// the volumes phase.
- splitVol := strings.SplitN(vol, ":", 3)
+ splitVol := specgen.SplitVolumeString(vol)
switch len(splitVol) {
case 1:
volDestinations[vol] = true
@@ -473,7 +479,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
}
if err := os.MkdirAll(vol, 0o755); err != nil {
if !os.IsExist(err) {
- return nil, nil, errors.Wrapf(err, "error making volume mountpoint for volume %s", vol)
+ return nil, nil, fmt.Errorf("error making volume mountpoint for volume %s: %w", vol, err)
}
}
}
diff --git a/pkg/api/handlers/compat/containers_export.go b/pkg/api/handlers/compat/containers_export.go
index 743ce2d53..66e1dcca5 100644
--- a/pkg/api/handlers/compat/containers_export.go
+++ b/pkg/api/handlers/compat/containers_export.go
@@ -1,6 +1,7 @@
package compat
import (
+ "fmt"
"io/ioutil"
"net/http"
"os"
@@ -8,7 +9,6 @@ import (
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
api "github.com/containers/podman/v4/pkg/api/types"
- "github.com/pkg/errors"
)
func ExportContainer(w http.ResponseWriter, r *http.Request) {
@@ -21,21 +21,21 @@ func ExportContainer(w http.ResponseWriter, r *http.Request) {
}
tmpfile, err := ioutil.TempFile("", "api.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
defer os.Remove(tmpfile.Name())
if err := tmpfile.Close(); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
return
}
if err := con.Export(tmpfile.Name()); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to save image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to save image: %w", err))
return
}
rdr, err := os.Open(tmpfile.Name())
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
return
}
defer rdr.Close()
diff --git a/pkg/api/handlers/compat/containers_logs.go b/pkg/api/handlers/compat/containers_logs.go
index fc894d815..77c4fdd2a 100644
--- a/pkg/api/handlers/compat/containers_logs.go
+++ b/pkg/api/handlers/compat/containers_logs.go
@@ -16,7 +16,6 @@ import (
api "github.com/containers/podman/v4/pkg/api/types"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
@@ -36,13 +35,13 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) {
Tail: "all",
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if !(query.Stdout || query.Stderr) {
msg := fmt.Sprintf("%s: you must choose at least one stream", http.StatusText(http.StatusBadRequest))
- utils.Error(w, http.StatusBadRequest, errors.Errorf("%s for %s", msg, r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("%s for %s", msg, r.URL.String()))
return
}
@@ -96,7 +95,7 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) {
logChannel := make(chan *logs.LogLine, tail+1)
if err := runtime.Log(r.Context(), []*libpod.Container{ctnr}, options, logChannel); err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "failed to obtain logs for Container '%s'", name))
+ utils.InternalServerError(w, fmt.Errorf("failed to obtain logs for Container '%s': %w", name, err))
return
}
go func() {
@@ -114,7 +113,7 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) {
if !utils.IsLibpodRequest(r) {
inspectData, err := ctnr.Inspect(false)
if err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "failed to obtain logs for Container '%s'", name))
+ utils.InternalServerError(w, fmt.Errorf("failed to obtain logs for Container '%s': %w", name, err))
return
}
writeHeader = !inspectData.Config.Tty
diff --git a/pkg/api/handlers/compat/containers_prune.go b/pkg/api/handlers/compat/containers_prune.go
index 9b5390d64..95d6a639f 100644
--- a/pkg/api/handlers/compat/containers_prune.go
+++ b/pkg/api/handlers/compat/containers_prune.go
@@ -2,6 +2,8 @@ package compat
import (
"bytes"
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -11,14 +13,13 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities/reports"
"github.com/containers/podman/v4/pkg/domain/filters"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func PruneContainers(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
filtersMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
diff --git a/pkg/api/handlers/compat/containers_restart.go b/pkg/api/handlers/compat/containers_restart.go
index ded6480bc..d805b95c2 100644
--- a/pkg/api/handlers/compat/containers_restart.go
+++ b/pkg/api/handlers/compat/containers_restart.go
@@ -1,6 +1,8 @@
package compat
import (
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -10,7 +12,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func RestartContainer(w http.ResponseWriter, r *http.Request) {
@@ -29,7 +30,7 @@ func RestartContainer(w http.ResponseWriter, r *http.Request) {
// override any golang type defaults
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -44,7 +45,7 @@ func RestartContainer(w http.ResponseWriter, r *http.Request) {
}
report, err := containerEngine.ContainerRestart(r.Context(), []string{name}, options)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
diff --git a/pkg/api/handlers/compat/containers_stats.go b/pkg/api/handlers/compat/containers_stats.go
index 77b16b03e..c115b4181 100644
--- a/pkg/api/handlers/compat/containers_stats.go
+++ b/pkg/api/handlers/compat/containers_stats.go
@@ -2,6 +2,7 @@ package compat
import (
"encoding/json"
+ "fmt"
"net/http"
"time"
@@ -12,7 +13,7 @@ import (
api "github.com/containers/podman/v4/pkg/api/types"
docker "github.com/docker/docker/api/types"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
+ runccgroups "github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/sirupsen/logrus"
)
@@ -29,7 +30,7 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) {
Stream: true,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if query.Stream && query.OneShot { // mismatch. one-shot can only be passed with stream=false
@@ -44,21 +45,9 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) {
return
}
- // If the container isn't running, then let's not bother and return
- // immediately.
- state, err := ctnr.State()
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
- if state != define.ContainerStateRunning {
- utils.Error(w, http.StatusConflict, define.ErrCtrStateInvalid)
- return
- }
-
stats, err := ctnr.GetContainerStats(nil)
if err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "failed to obtain Container %s stats", name))
+ utils.InternalServerError(w, fmt.Errorf("failed to obtain Container %s stats: %w", name, err))
return
}
@@ -70,7 +59,7 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) {
flusher.Flush()
}
- // Setup JSON encoder for streaming.
+ // Set up JSON encoder for streaming.
coder.SetEscapeHTML(true)
var preRead time.Time
var preCPUStats CPUStats
@@ -144,17 +133,23 @@ streamLabel: // A label to flatten the scope
InstanceID: "",
}
+ cfg := ctnr.Config()
+ memoryLimit := cgroupStat.MemoryStats.Usage.Limit
+ if cfg.Spec.Linux != nil && cfg.Spec.Linux.Resources != nil && cfg.Spec.Linux.Resources.Memory != nil && *cfg.Spec.Linux.Resources.Memory.Limit > 0 {
+ memoryLimit = uint64(*cfg.Spec.Linux.Resources.Memory.Limit)
+ }
+
systemUsage, _ := cgroups.GetSystemCPUUsage()
s := StatsJSON{
Stats: Stats{
Read: time.Now(),
PreRead: preRead,
PidsStats: docker.PidsStats{
- Current: cgroupStat.Pids.Current,
+ Current: cgroupStat.PidsStats.Current,
Limit: 0,
},
BlkioStats: docker.BlkioStats{
- IoServiceBytesRecursive: toBlkioStatEntry(cgroupStat.Blkio.IoServiceBytesRecursive),
+ IoServiceBytesRecursive: toBlkioStatEntry(cgroupStat.BlkioStats.IoServiceBytesRecursive),
IoServicedRecursive: nil,
IoQueuedRecursive: nil,
IoServiceTimeRecursive: nil,
@@ -165,14 +160,14 @@ streamLabel: // A label to flatten the scope
},
CPUStats: CPUStats{
CPUUsage: docker.CPUUsage{
- TotalUsage: cgroupStat.CPU.Usage.Total,
- PercpuUsage: cgroupStat.CPU.Usage.PerCPU,
- UsageInKernelmode: cgroupStat.CPU.Usage.Kernel,
- UsageInUsermode: cgroupStat.CPU.Usage.Total - cgroupStat.CPU.Usage.Kernel,
+ TotalUsage: cgroupStat.CpuStats.CpuUsage.TotalUsage,
+ PercpuUsage: cgroupStat.CpuStats.CpuUsage.PercpuUsage,
+ UsageInKernelmode: cgroupStat.CpuStats.CpuUsage.UsageInKernelmode,
+ UsageInUsermode: cgroupStat.CpuStats.CpuUsage.TotalUsage - cgroupStat.CpuStats.CpuUsage.UsageInKernelmode,
},
CPU: stats.CPU,
SystemUsage: systemUsage,
- OnlineCPUs: uint32(len(cgroupStat.CPU.Usage.PerCPU)),
+ OnlineCPUs: uint32(len(cgroupStat.CpuStats.CpuUsage.PercpuUsage)),
ThrottlingData: docker.ThrottlingData{
Periods: 0,
ThrottledPeriods: 0,
@@ -181,11 +176,11 @@ streamLabel: // A label to flatten the scope
},
PreCPUStats: preCPUStats,
MemoryStats: docker.MemoryStats{
- Usage: cgroupStat.Memory.Usage.Usage,
- MaxUsage: cgroupStat.Memory.Usage.Limit,
+ Usage: cgroupStat.MemoryStats.Usage.Usage,
+ MaxUsage: cgroupStat.MemoryStats.Usage.Limit,
Stats: nil,
Failcnt: 0,
- Limit: cgroupStat.Memory.Usage.Limit,
+ Limit: memoryLimit,
Commit: 0,
CommitPeak: 0,
PrivateWorkingSet: 0,
@@ -222,7 +217,7 @@ streamLabel: // A label to flatten the scope
}
}
-func toBlkioStatEntry(entries []cgroups.BlkIOEntry) []docker.BlkioStatEntry {
+func toBlkioStatEntry(entries []runccgroups.BlkioStatEntry) []docker.BlkioStatEntry {
results := make([]docker.BlkioStatEntry, len(entries))
for i, e := range entries {
bits, err := json.Marshal(e)
diff --git a/pkg/api/handlers/compat/containers_stop.go b/pkg/api/handlers/compat/containers_stop.go
index 1c1fb310c..33bb3a679 100644
--- a/pkg/api/handlers/compat/containers_stop.go
+++ b/pkg/api/handlers/compat/containers_stop.go
@@ -1,6 +1,8 @@
package compat
import (
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -10,7 +12,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func StopContainer(w http.ResponseWriter, r *http.Request) {
@@ -29,7 +30,7 @@ func StopContainer(w http.ResponseWriter, r *http.Request) {
// override any golang type defaults
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -63,7 +64,7 @@ func StopContainer(w http.ResponseWriter, r *http.Request) {
}
report, err := containerEngine.ContainerStop(r.Context(), []string{name}, options)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
diff --git a/pkg/api/handlers/compat/containers_top.go b/pkg/api/handlers/compat/containers_top.go
index 6ca178cf7..9f598cd73 100644
--- a/pkg/api/handlers/compat/containers_top.go
+++ b/pkg/api/handlers/compat/containers_top.go
@@ -12,7 +12,6 @@ import (
"github.com/containers/podman/v4/pkg/api/handlers/utils"
api "github.com/containers/podman/v4/pkg/api/types"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -33,7 +32,7 @@ func TopContainer(w http.ResponseWriter, r *http.Request) {
PsArgs: psArgs,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go
index 6bcb7bd32..18fb35966 100644
--- a/pkg/api/handlers/compat/events.go
+++ b/pkg/api/handlers/compat/events.go
@@ -1,6 +1,7 @@
package compat
import (
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -34,7 +34,7 @@ func GetEvents(w http.ResponseWriter, r *http.Request) {
Stream: true,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -44,7 +44,7 @@ func GetEvents(w http.ResponseWriter, r *http.Request) {
libpodFilters, err := util.FiltersFromRequest(r)
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
eventChannel := make(chan *events.Event)
diff --git a/pkg/api/handlers/compat/exec.go b/pkg/api/handlers/compat/exec.go
index a8b45c685..1b4dead8b 100644
--- a/pkg/api/handlers/compat/exec.go
+++ b/pkg/api/handlers/compat/exec.go
@@ -2,9 +2,12 @@ package compat
import (
"encoding/json"
+ "errors"
+ "fmt"
"net/http"
"strings"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/api/handlers"
@@ -14,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/gorilla/mux"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -24,7 +26,7 @@ func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
input := new(handlers.ExecCreateConfig)
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "error decoding request body as JSON"))
+ utils.InternalServerError(w, fmt.Errorf("error decoding request body as JSON: %w", err))
return
}
@@ -48,7 +50,7 @@ func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
for _, envStr := range input.Env {
split := strings.SplitN(envStr, "=", 2)
if len(split) != 2 {
- utils.Error(w, http.StatusBadRequest, errors.Errorf("environment variable %q badly formed, must be key=value", envStr))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("environment variable %q badly formed, must be key=value", envStr))
return
}
libpodConfig.Environment[split[0]] = split[1]
@@ -78,14 +80,14 @@ func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
sessID, err := ctr.ExecCreate(libpodConfig)
if err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid {
+ if errors.Is(err, define.ErrCtrStateInvalid) {
// Check if the container is paused. If so, return a 409
state, err := ctr.State()
if err == nil {
// Ignore the error != nil case. We're already
// throwing an InternalServerError below.
if state == define.ContainerStatePaused {
- utils.Error(w, http.StatusConflict, errors.Errorf("cannot create exec session as container %s is paused", ctr.ID()))
+ utils.Error(w, http.StatusConflict, fmt.Errorf("cannot create exec session as container %s is paused", ctr.ID()))
return
}
}
@@ -112,7 +114,7 @@ func ExecInspectHandler(w http.ResponseWriter, r *http.Request) {
session, err := sessionCtr.ExecSession(sessionID)
if err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "error retrieving exec session %s from container %s", sessionID, sessionCtr.ID()))
+ utils.InternalServerError(w, fmt.Errorf("error retrieving exec session %s from container %s: %w", sessionID, sessionCtr.ID(), err))
return
}
@@ -135,7 +137,7 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
bodyParams := new(handlers.ExecStartConfig)
if err := json.NewDecoder(r.Body).Decode(&bodyParams); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to decode parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to decode parameters for %s: %w", r.URL.String(), err))
return
}
// TODO: Verify TTY setting against what inspect session was made with
@@ -154,7 +156,7 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
return
}
if state != define.ContainerStateRunning {
- utils.Error(w, http.StatusConflict, errors.Errorf("cannot exec in a container that is not running; container %s is %s", sessionCtr.ID(), state.String()))
+ utils.Error(w, http.StatusConflict, fmt.Errorf("cannot exec in a container that is not running; container %s is %s", sessionCtr.ID(), state.String()))
return
}
@@ -172,12 +174,12 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
}
logErr := func(e error) {
- logrus.Error(errors.Wrapf(e, "error attaching to container %s exec session %s", sessionCtr.ID(), sessionID))
+ logrus.Error(fmt.Errorf("error attaching to container %s exec session %s: %w", sessionCtr.ID(), sessionID, e))
}
- var size *define.TerminalSize
+ var size *resize.TerminalSize
if bodyParams.Tty && (bodyParams.Height > 0 || bodyParams.Width > 0) {
- size = &define.TerminalSize{
+ size = &resize.TerminalSize{
Height: bodyParams.Height,
Width: bodyParams.Width,
}
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index 8c4dea327..2f8d151d8 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -2,6 +2,7 @@ package compat
import (
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -24,7 +25,6 @@ import (
"github.com/containers/storage"
"github.com/gorilla/schema"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -50,7 +50,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
tmpfile, err := ioutil.TempFile("", "api.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
defer os.Remove(tmpfile.Name())
@@ -58,7 +58,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
@@ -70,22 +70,22 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
}
if err := imageEngine.Save(r.Context(), possiblyNormalizedName, nil, saveOptions); err != nil {
- if errors.Cause(err) == storage.ErrImageUnknown {
- utils.ImageNotFound(w, name, errors.Wrapf(err, "failed to find image %s", name))
+ if errors.Is(err, storage.ErrImageUnknown) {
+ utils.ImageNotFound(w, name, fmt.Errorf("failed to find image %s: %w", name, err))
return
}
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
if err := tmpfile.Close(); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
return
}
rdr, err := os.Open(tmpfile.Name())
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
return
}
defer rdr.Close()
@@ -111,12 +111,12 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
rtc, err := runtime.GetConfig()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
sc := runtime.SystemContext()
@@ -132,7 +132,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
input := handlers.CreateContainerConfig{}
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
@@ -154,7 +154,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
destImage = fmt.Sprintf("%s:%s", query.Repo, query.Tag)
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, destImage)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
destImage = possiblyNormalizedName
@@ -162,10 +162,10 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
commitImage, err := ctr.Commit(r.Context(), destImage, options)
if err != nil && !strings.Contains(err.Error(), "is not running") {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "CommitFailure"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("CommitFailure: %w", err))
return
}
- utils.WriteResponse(w, http.StatusCreated, entities.IDResponse{ID: commitImage.ID()}) // nolint
+ utils.WriteResponse(w, http.StatusCreated, entities.IDResponse{ID: commitImage.ID()})
}
func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
@@ -186,7 +186,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
// fromSrc – Source to import. The value may be a URL from which the image can be retrieved or - to read the image from the request body. This parameter may only be used when importing an image.
@@ -194,13 +194,13 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
if source == "-" {
f, err := ioutil.TempFile("", "api_load.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to create tempfile: %w", err))
return
}
source = f.Name()
if err := SaveFromBody(f, r); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to write temporary file"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to write temporary file: %w", err))
}
}
@@ -208,7 +208,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
if query.Repo != "" {
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, reference)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
reference = possiblyNormalizedName
@@ -229,7 +229,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
imageEngine := abi.ImageEngine{Libpod: runtime}
report, err := imageEngine.Import(r.Context(), opts)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to import tarball"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to import tarball: %w", err))
return
}
// Success
@@ -237,7 +237,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
Status string `json:"status"`
Progress string `json:"progress"`
ProgressDetail map[string]string `json:"progressDetail"`
- Id string `json:"id"` // nolint
+ Id string `json:"id"` //nolint:revive,stylecheck
}{
Status: report.Id,
ProgressDetail: map[string]string{},
@@ -265,13 +265,13 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
// This is where you can override the golang default value for one of fields
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, mergeNameAndTagOrDigest(query.FromImage, query.Tag))
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
@@ -333,7 +333,7 @@ loop: // break out of for/select infinite loop
Total int64 `json:"total,omitempty"`
} `json:"progressDetail,omitempty"`
Error string `json:"error,omitempty"`
- Id string `json:"id,omitempty"` // nolint
+ Id string `json:"id,omitempty"` //nolint:revive,stylecheck
}
select {
case e := <-progress:
@@ -388,7 +388,7 @@ func GetImage(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
@@ -397,12 +397,12 @@ func GetImage(w http.ResponseWriter, r *http.Request) {
// Here we need to fiddle with the error message because docker-py is looking for "No
// such image" to determine on how to raise the correct exception.
errMsg := strings.ReplaceAll(err.Error(), "image not known", "No such image")
- utils.Error(w, http.StatusNotFound, errors.Errorf("failed to find image %s: %s", name, errMsg))
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %s", name, errMsg))
return
}
inspect, err := handlers.ImageDataToImageInspect(r.Context(), newImage)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to convert ImageData to ImageInspect '%s'", inspect.ID))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to convert ImageData to ImageInspect '%s': %w", inspect.ID, err))
return
}
utils.WriteResponse(w, http.StatusOK, inspect)
@@ -421,7 +421,7 @@ func GetImages(w http.ResponseWriter, r *http.Request) {
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if _, found := r.URL.Query()["digests"]; found && query.Digests {
@@ -460,8 +460,6 @@ func GetImages(w http.ResponseWriter, r *http.Request) {
}
func LoadImages(w http.ResponseWriter, r *http.Request) {
- // TODO this is basically wrong
- // TODO ... improve these ^ messages to something useful
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
@@ -474,7 +472,7 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -482,7 +480,7 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
// to load.
f, err := ioutil.TempFile("", "api_load.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to create tempfile: %w", err))
return
}
defer func() {
@@ -492,7 +490,7 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
}
}()
if err := SaveFromBody(f, r); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to write temporary file"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to write temporary file: %w", err))
return
}
@@ -501,12 +499,12 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
loadOptions := entities.ImageLoadOptions{Input: f.Name()}
loadReport, err := imageEngine.Load(r.Context(), loadOptions)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to load image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to load image: %w", err))
return
}
if len(loadReport.Names) < 1 {
- utils.Error(w, http.StatusInternalServerError, errors.Errorf("one or more images are required"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("one or more images are required"))
return
}
@@ -529,7 +527,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
// This is where you can override the golang default value for one of fields
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if len(query.Names) == 0 {
@@ -541,7 +539,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
for i, img := range query.Names {
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, img)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
images[i] = possiblyNormalizedName
@@ -549,12 +547,12 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
tmpfile, err := ioutil.TempFile("", "api.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
defer os.Remove(tmpfile.Name())
if err := tmpfile.Close(); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
return
}
@@ -568,7 +566,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
rdr, err := os.Open(tmpfile.Name())
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
return
}
defer rdr.Close()
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index bcd102901..a9185c3d3 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -3,6 +3,7 @@ package compat
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -27,7 +28,6 @@ import (
"github.com/docker/docker/pkg/jsonmessage"
"github.com/gorilla/schema"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -65,72 +65,75 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
err := os.RemoveAll(filepath.Dir(contextDirectory))
if err != nil {
- logrus.Warn(errors.Wrapf(err, "failed to remove build scratch directory %q", filepath.Dir(contextDirectory)))
+ logrus.Warn(fmt.Errorf("failed to remove build scratch directory %q: %w", filepath.Dir(contextDirectory), err))
}
}()
query := struct {
- AddHosts string `schema:"extrahosts"`
- AdditionalCapabilities string `schema:"addcaps"`
- AllPlatforms bool `schema:"allplatforms"`
- Annotations string `schema:"annotations"`
- AppArmor string `schema:"apparmor"`
- BuildArgs string `schema:"buildargs"`
- CacheFrom string `schema:"cachefrom"`
- CgroupParent string `schema:"cgroupparent"` // nolint
- Compression uint64 `schema:"compression"`
- ConfigureNetwork string `schema:"networkmode"`
- CpuPeriod uint64 `schema:"cpuperiod"` // nolint
- CpuQuota int64 `schema:"cpuquota"` // nolint
- CpuSetCpus string `schema:"cpusetcpus"` // nolint
- CpuSetMems string `schema:"cpusetmems"` // nolint
- CpuShares uint64 `schema:"cpushares"` // nolint
- DNSOptions string `schema:"dnsoptions"`
- DNSSearch string `schema:"dnssearch"`
- DNSServers string `schema:"dnsservers"`
- Devices string `schema:"devices"`
- Dockerfile string `schema:"dockerfile"`
- DropCapabilities string `schema:"dropcaps"`
- Envs []string `schema:"setenv"`
- Excludes string `schema:"excludes"`
- ForceRm bool `schema:"forcerm"`
- From string `schema:"from"`
- HTTPProxy bool `schema:"httpproxy"`
- IdentityLabel bool `schema:"identitylabel"`
- Ignore bool `schema:"ignore"`
- Isolation string `schema:"isolation"`
- Jobs int `schema:"jobs"` // nolint
- LabelOpts string `schema:"labelopts"`
- Labels string `schema:"labels"`
- Layers bool `schema:"layers"`
- LogRusage bool `schema:"rusage"`
- Manifest string `schema:"manifest"`
- MemSwap int64 `schema:"memswap"`
- Memory int64 `schema:"memory"`
- NamespaceOptions string `schema:"nsoptions"`
- NoCache bool `schema:"nocache"`
- OSFeatures []string `schema:"osfeature"`
- OSVersion string `schema:"osversion"`
- OutputFormat string `schema:"outputformat"`
- Platform []string `schema:"platform"`
- Pull bool `schema:"pull"`
- PullPolicy string `schema:"pullpolicy"`
- Quiet bool `schema:"q"`
- Registry string `schema:"registry"`
- Rm bool `schema:"rm"`
- RusageLogFile string `schema:"rusagelogfile"`
- Remote string `schema:"remote"`
- Seccomp string `schema:"seccomp"`
- Secrets string `schema:"secrets"`
- SecurityOpt string `schema:"securityopt"`
- ShmSize int `schema:"shmsize"`
- Squash bool `schema:"squash"`
- TLSVerify bool `schema:"tlsVerify"`
- Tags []string `schema:"t"`
- Target string `schema:"target"`
- Timestamp int64 `schema:"timestamp"`
- Ulimits string `schema:"ulimits"`
- UnsetEnvs []string `schema:"unsetenv"`
+ AddHosts string `schema:"extrahosts"`
+ AdditionalCapabilities string `schema:"addcaps"`
+ AdditionalBuildContexts string `schema:"additionalbuildcontexts"`
+ AllPlatforms bool `schema:"allplatforms"`
+ Annotations string `schema:"annotations"`
+ AppArmor string `schema:"apparmor"`
+ BuildArgs string `schema:"buildargs"`
+ CacheFrom string `schema:"cachefrom"`
+ CgroupParent string `schema:"cgroupparent"`
+ Compression uint64 `schema:"compression"`
+ ConfigureNetwork string `schema:"networkmode"`
+ CPPFlags string `schema:"cppflags"`
+ CpuPeriod uint64 `schema:"cpuperiod"` //nolint:revive,stylecheck
+ CpuQuota int64 `schema:"cpuquota"` //nolint:revive,stylecheck
+ CpuSetCpus string `schema:"cpusetcpus"` //nolint:revive,stylecheck
+ CpuSetMems string `schema:"cpusetmems"` //nolint:revive,stylecheck
+ CpuShares uint64 `schema:"cpushares"` //nolint:revive,stylecheck
+ DNSOptions string `schema:"dnsoptions"`
+ DNSSearch string `schema:"dnssearch"`
+ DNSServers string `schema:"dnsservers"`
+ Devices string `schema:"devices"`
+ Dockerfile string `schema:"dockerfile"`
+ DropCapabilities string `schema:"dropcaps"`
+ Envs []string `schema:"setenv"`
+ Excludes string `schema:"excludes"`
+ ForceRm bool `schema:"forcerm"`
+ From string `schema:"from"`
+ HTTPProxy bool `schema:"httpproxy"`
+ IdentityLabel bool `schema:"identitylabel"`
+ Ignore bool `schema:"ignore"`
+ Isolation string `schema:"isolation"`
+ Jobs int `schema:"jobs"`
+ LabelOpts string `schema:"labelopts"`
+ Labels string `schema:"labels"`
+ Layers bool `schema:"layers"`
+ LogRusage bool `schema:"rusage"`
+ Manifest string `schema:"manifest"`
+ MemSwap int64 `schema:"memswap"`
+ Memory int64 `schema:"memory"`
+ NamespaceOptions string `schema:"nsoptions"`
+ NoCache bool `schema:"nocache"`
+ OmitHistory bool `schema:"omithistory"`
+ OSFeatures []string `schema:"osfeature"`
+ OSVersion string `schema:"osversion"`
+ OutputFormat string `schema:"outputformat"`
+ Platform []string `schema:"platform"`
+ Pull bool `schema:"pull"`
+ PullPolicy string `schema:"pullpolicy"`
+ Quiet bool `schema:"q"`
+ Registry string `schema:"registry"`
+ Rm bool `schema:"rm"`
+ RusageLogFile string `schema:"rusagelogfile"`
+ Remote string `schema:"remote"`
+ Seccomp string `schema:"seccomp"`
+ Secrets string `schema:"secrets"`
+ SecurityOpt string `schema:"securityopt"`
+ ShmSize int `schema:"shmsize"`
+ Squash bool `schema:"squash"`
+ TLSVerify bool `schema:"tlsVerify"`
+ Tags []string `schema:"t"`
+ Target string `schema:"target"`
+ Timestamp int64 `schema:"timestamp"`
+ Ulimits string `schema:"ulimits"`
+ UnsetEnvs []string `schema:"unsetenv"`
}{
Dockerfile: "Dockerfile",
IdentityLabel: true,
@@ -170,7 +173,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
// convert addcaps formats
containerFiles := []string{}
- // Tells if query paramemter `dockerfile` is set or not.
+ // Tells if query parameter `dockerfile` is set or not.
dockerFileSet := false
if utils.IsLibpodRequest(r) && query.Remote != "" {
// The context directory could be a URL. Try to handle that.
@@ -335,7 +338,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
if len(tags) > 0 {
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, tags[0])
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
output = possiblyNormalizedName
@@ -347,7 +350,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
var err error
isolation, err = parseLibPodIsolation(query.Isolation)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to parse isolation"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse isolation: %w", err))
return
}
@@ -364,16 +367,24 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
}
}
- var additionalTags []string // nolint
+ var additionalTags []string
for i := 1; i < len(tags); i++ {
possiblyNormalizedTag, err := utils.NormalizeToDockerHub(r, tags[i])
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
additionalTags = append(additionalTags, possiblyNormalizedTag)
}
+ var additionalBuildContexts = map[string]*buildahDefine.AdditionalBuildContext{}
+ if _, found := r.URL.Query()["additionalbuildcontexts"]; found {
+ if err := json.Unmarshal([]byte(query.AdditionalBuildContexts), &additionalBuildContexts); err != nil {
+ utils.BadRequest(w, "additionalbuildcontexts", query.AdditionalBuildContexts, err)
+ return
+ }
+ }
+
var buildArgs = map[string]string{}
if _, found := r.URL.Query()["buildargs"]; found {
if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil {
@@ -399,6 +410,15 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
}
+ // convert cppflags formats
+ var cppflags = []string{}
+ if _, found := r.URL.Query()["cppflags"]; found {
+ if err := json.Unmarshal([]byte(query.CPPFlags), &cppflags); err != nil {
+ utils.BadRequest(w, "cppflags", query.CPPFlags, err)
+ return
+ }
+ }
+
// convert nsoptions formats
nsoptions := buildah.NamespaceOptions{}
if _, found := r.URL.Query()["nsoptions"]; found {
@@ -467,7 +487,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
con := strings.SplitN(opt, "=", 2)
if len(con) != 2 {
- utils.BadRequest(w, "securityopt", query.SecurityOpt, errors.Errorf("Invalid --security-opt name=value pair: %q", opt))
+ utils.BadRequest(w, "securityopt", query.SecurityOpt, fmt.Errorf("invalid --security-opt name=value pair: %q", opt))
return
}
@@ -479,7 +499,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
case "seccomp":
seccomp = con[1]
default:
- utils.BadRequest(w, "securityopt", query.SecurityOpt, errors.Errorf("Invalid --security-opt 2: %q", opt))
+ utils.BadRequest(w, "securityopt", query.SecurityOpt, fmt.Errorf("invalid --security-opt 2: %q", opt))
return
}
}
@@ -520,7 +540,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
if fromImage != "" {
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, fromImage)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
fromImage = possiblyNormalizedName
@@ -552,11 +572,13 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
buildOptions := buildahDefine.BuildOptions{
- AddCapabilities: addCaps,
- AdditionalTags: additionalTags,
- Annotations: annotations,
- Args: buildArgs,
- AllPlatforms: query.AllPlatforms,
+ AddCapabilities: addCaps,
+ AdditionalBuildContexts: additionalBuildContexts,
+ AdditionalTags: additionalTags,
+ Annotations: annotations,
+ CPPFlags: cppflags,
+ Args: buildArgs,
+ AllPlatforms: query.AllPlatforms,
CommonBuildOpts: &buildah.CommonBuildOptions{
AddHost: addhosts,
ApparmorProfile: apparmor,
@@ -574,6 +596,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
LabelOpts: labelOpts,
Memory: query.Memory,
MemorySwap: query.MemSwap,
+ OmitHistory: query.OmitHistory,
SeccompProfilePath: seccomp,
ShmSize: strconv.Itoa(query.ShmSize),
Ulimit: ulimits,
@@ -674,15 +697,17 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
enc := json.NewEncoder(body)
enc.SetEscapeHTML(true)
+ var stepErrors []string
for {
- m := struct {
+ type BuildResponse struct {
Stream string `json:"stream,omitempty"`
Error *jsonmessage.JSONError `json:"errorDetail,omitempty"`
// NOTE: `error` is being deprecated check https://github.com/moby/moby/blob/master/pkg/jsonmessage/jsonmessage.go#L148
ErrorMessage string `json:"error,omitempty"` // deprecate this slowly
Aux json.RawMessage `json:"aux,omitempty"`
- }{}
+ }
+ m := BuildResponse{}
select {
case e := <-stdout.Chan():
@@ -698,12 +723,27 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
flush()
case e := <-auxout.Chan():
- m.Stream = string(e)
- if err := enc.Encode(m); err != nil {
- stderr.Write([]byte(err.Error()))
+ if !query.Quiet {
+ m.Stream = string(e)
+ if err := enc.Encode(m); err != nil {
+ stderr.Write([]byte(err.Error()))
+ }
+ flush()
+ } else {
+ stepErrors = append(stepErrors, string(e))
}
- flush()
case e := <-stderr.Chan():
+ // Docker-API Compat parity : Build failed so
+ // output all step errors irrespective of quiet
+ // flag.
+ for _, stepError := range stepErrors {
+ t := BuildResponse{}
+ t.Stream = stepError
+ if err := enc.Encode(t); err != nil {
+ stderr.Write([]byte(err.Error()))
+ }
+ flush()
+ }
m.ErrorMessage = string(e)
m.Error = &jsonmessage.JSONError{
Message: m.ErrorMessage,
@@ -761,7 +801,7 @@ func parseNetworkConfigurationPolicy(network string) buildah.NetworkConfiguratio
}
}
-func parseLibPodIsolation(isolation string) (buildah.Isolation, error) { // nolint
+func parseLibPodIsolation(isolation string) (buildah.Isolation, error) {
if val, err := strconv.Atoi(isolation); err == nil {
return buildah.Isolation(val), nil
}
diff --git a/pkg/api/handlers/compat/images_history.go b/pkg/api/handlers/compat/images_history.go
index 70a11ddc5..ebb5acdd9 100644
--- a/pkg/api/handlers/compat/images_history.go
+++ b/pkg/api/handlers/compat/images_history.go
@@ -1,13 +1,13 @@
package compat
import (
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/pkg/api/handlers"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
api "github.com/containers/podman/v4/pkg/api/types"
- "github.com/pkg/errors"
)
func HistoryImage(w http.ResponseWriter, r *http.Request) {
@@ -16,13 +16,13 @@ func HistoryImage(w http.ResponseWriter, r *http.Request) {
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
newImage, _, err := runtime.LibimageRuntime().LookupImage(possiblyNormalizedName, nil)
if err != nil {
- utils.ImageNotFound(w, possiblyNormalizedName, errors.Wrapf(err, "failed to find image %s", possiblyNormalizedName))
+ utils.ImageNotFound(w, possiblyNormalizedName, fmt.Errorf("failed to find image %s: %w", possiblyNormalizedName, err))
return
}
history, err := newImage.History(r.Context())
diff --git a/pkg/api/handlers/compat/images_prune.go b/pkg/api/handlers/compat/images_prune.go
index 02cadbbbe..cc60ceb1c 100644
--- a/pkg/api/handlers/compat/images_prune.go
+++ b/pkg/api/handlers/compat/images_prune.go
@@ -2,6 +2,7 @@ package compat
import (
"bytes"
+ "errors"
"fmt"
"net/http"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/podman/v4/pkg/util"
"github.com/docker/docker/api/types"
- "github.com/pkg/errors"
)
func PruneImages(w http.ResponseWriter, r *http.Request) {
@@ -22,7 +22,7 @@ func PruneImages(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go
index 6765c30b6..bb82ef10d 100644
--- a/pkg/api/handlers/compat/images_push.go
+++ b/pkg/api/handlers/compat/images_push.go
@@ -2,6 +2,7 @@ package compat
import (
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -17,7 +18,6 @@ import (
"github.com/containers/storage"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,7 +28,7 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
digestFile, err := ioutil.TempFile("", "digest.txt")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
defer digestFile.Close()
@@ -50,7 +50,7 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -63,19 +63,19 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
}
if _, err := utils.ParseStorageReference(imageName); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "image source %q is not a containers-storage-transport reference", imageName))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("image source %q is not a containers-storage-transport reference: %w", imageName, err))
return
}
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, imageName)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
imageName = possiblyNormalizedName
localImage, _, err := runtime.LibimageRuntime().LookupImage(possiblyNormalizedName, nil)
if err != nil {
- utils.ImageNotFound(w, imageName, errors.Wrapf(err, "failed to find image %s", imageName))
+ utils.ImageNotFound(w, imageName, fmt.Errorf("failed to find image %s: %w", imageName, err))
return
}
rawManifest, _, err := localImage.Manifest(r.Context())
diff --git a/pkg/api/handlers/compat/images_remove.go b/pkg/api/handlers/compat/images_remove.go
index 35bcb36aa..b59bfd0b1 100644
--- a/pkg/api/handlers/compat/images_remove.go
+++ b/pkg/api/handlers/compat/images_remove.go
@@ -1,6 +1,8 @@
package compat
import (
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -10,7 +12,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/storage"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func RemoveImage(w http.ResponseWriter, r *http.Request) {
@@ -25,7 +26,7 @@ func RemoveImage(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if _, found := r.URL.Query()["noprune"]; found {
@@ -36,7 +37,7 @@ func RemoveImage(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
@@ -48,12 +49,12 @@ func RemoveImage(w http.ResponseWriter, r *http.Request) {
report, rmerrors := imageEngine.Remove(r.Context(), []string{possiblyNormalizedName}, options)
if len(rmerrors) > 0 && rmerrors[0] != nil {
err := rmerrors[0]
- if errors.Cause(err) == storage.ErrImageUnknown {
- utils.ImageNotFound(w, name, errors.Wrapf(err, "failed to find image %s", name))
+ if errors.Is(err, storage.ErrImageUnknown) {
+ utils.ImageNotFound(w, name, fmt.Errorf("failed to find image %s: %w", name, err))
return
}
- if errors.Cause(err) == storage.ErrImageUsedByContainer {
- utils.Error(w, http.StatusConflict, errors.Wrapf(err, "image %s is in use", name))
+ if errors.Is(err, storage.ErrImageUsedByContainer) {
+ utils.Error(w, http.StatusConflict, fmt.Errorf("image %s is in use: %w", name, err))
return
}
utils.Error(w, http.StatusInternalServerError, err)
diff --git a/pkg/api/handlers/compat/images_save.go b/pkg/api/handlers/compat/images_save.go
index b39c719a0..6314756f6 100644
--- a/pkg/api/handlers/compat/images_save.go
+++ b/pkg/api/handlers/compat/images_save.go
@@ -6,7 +6,7 @@ import (
"os"
)
-func SaveFromBody(f *os.File, r *http.Request) error { // nolint
+func SaveFromBody(f *os.File, r *http.Request) error {
if _, err := io.Copy(f, r.Body); err != nil {
return err
}
diff --git a/pkg/api/handlers/compat/images_search.go b/pkg/api/handlers/compat/images_search.go
index 9f41c1b4f..a6fd3a3a1 100644
--- a/pkg/api/handlers/compat/images_search.go
+++ b/pkg/api/handlers/compat/images_search.go
@@ -13,7 +13,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/storage"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func SearchImages(w http.ResponseWriter, r *http.Request) {
@@ -30,7 +29,7 @@ func SearchImages(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
diff --git a/pkg/api/handlers/compat/images_tag.go b/pkg/api/handlers/compat/images_tag.go
index da0a04e84..a1da7a4b9 100644
--- a/pkg/api/handlers/compat/images_tag.go
+++ b/pkg/api/handlers/compat/images_tag.go
@@ -1,6 +1,7 @@
package compat
import (
+ "errors"
"fmt"
"net/http"
@@ -8,7 +9,6 @@ import (
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
api "github.com/containers/podman/v4/pkg/api/types"
- "github.com/pkg/errors"
)
func TagImage(w http.ResponseWriter, r *http.Request) {
@@ -17,7 +17,7 @@ func TagImage(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
@@ -25,7 +25,7 @@ func TagImage(w http.ResponseWriter, r *http.Request) {
lookupOptions := &libimage.LookupImageOptions{ManifestList: true}
newImage, _, err := runtime.LibimageRuntime().LookupImage(possiblyNormalizedName, lookupOptions)
if err != nil {
- utils.ImageNotFound(w, name, errors.Wrapf(err, "failed to find image %s", name))
+ utils.ImageNotFound(w, name, fmt.Errorf("failed to find image %s: %w", name, err))
return
}
@@ -42,7 +42,7 @@ func TagImage(w http.ResponseWriter, r *http.Request) {
possiblyNormalizedTag, err := utils.NormalizeToDockerHub(r, tagName)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
return
}
diff --git a/pkg/api/handlers/compat/info.go b/pkg/api/handlers/compat/info.go
index 85547570a..d82513284 100644
--- a/pkg/api/handlers/compat/info.go
+++ b/pkg/api/handlers/compat/info.go
@@ -22,7 +22,6 @@ import (
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/google/uuid"
- "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
@@ -33,18 +32,18 @@ func GetInfo(w http.ResponseWriter, r *http.Request) {
infoData, err := runtime.Info()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to obtain system memory info"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to obtain system memory info: %w", err))
return
}
configInfo, err := runtime.GetConfig()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to obtain runtime config"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to obtain runtime config: %w", err))
return
}
versionInfo, err := define.GetVersion()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to obtain podman versions"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to obtain podman versions: %w", err))
return
}
stateInfo := getContainersState(runtime)
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index 6fdd5c6a7..65177218a 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -2,6 +2,7 @@ package compat
import (
"encoding/json"
+ "errors"
"fmt"
"net"
"net/http"
@@ -19,7 +20,6 @@ import (
dockerNetwork "github.com/docker/docker/api/types/network"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -36,7 +36,7 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) {
}
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -133,7 +133,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -161,12 +161,13 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
func CreateNetwork(w http.ResponseWriter, r *http.Request) {
var (
- networkCreate types.NetworkCreateRequest
- network nettypes.Network
+ networkCreate types.NetworkCreateRequest
+ network nettypes.Network
+ responseWarning string
)
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
if err := json.NewDecoder(r.Body).Decode(&networkCreate); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
@@ -179,8 +180,40 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
network.Internal = networkCreate.Internal
network.IPv6Enabled = networkCreate.EnableIPv6
- // FIXME use docker options and convert them to valid libpod options
- // network.Options = networkCreate.Options
+ network.Options = make(map[string]string)
+
+ // TODO: we should consider making this constants in c/common/libnetwork/types
+ for opt, optVal := range networkCreate.Options {
+ switch opt {
+ case "mtu":
+ fallthrough
+ case "com.docker.network.driver.mtu":
+ if network.Driver == nettypes.BridgeNetworkDriver {
+ network.Options["mtu"] = optVal
+ }
+ case "icc":
+ fallthrough
+ case "com.docker.network.bridge.enable_icc":
+ // TODO: needs to be implemented
+ if network.Driver == nettypes.BridgeNetworkDriver {
+ responseWarning = "com.docker.network.bridge.enable_icc is not currently implemented"
+ }
+ case "com.docker.network.bridge.name":
+ if network.Driver == nettypes.BridgeNetworkDriver {
+ network.NetworkInterface = optVal
+ }
+ case "mode":
+ if network.Driver == nettypes.MacVLANNetworkDriver || network.Driver == nettypes.IPVLANNetworkDriver {
+ network.Options[opt] = optVal
+ }
+ case "parent":
+ if network.Driver == nettypes.MacVLANNetworkDriver || network.Driver == nettypes.IPVLANNetworkDriver {
+ network.NetworkInterface = optVal
+ }
+ default:
+ responseWarning = "\"" + opt + ": " + optVal + "\" is not a recognized option"
+ }
+ }
// dns is only enabled for the bridge driver
if network.Driver == nettypes.BridgeNetworkDriver {
@@ -194,7 +227,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
var err error
subnet, err := nettypes.ParseCIDR(conf.Subnet)
if err != nil {
- utils.InternalServerError(w, errors.Wrap(err, "failed to parse subnet"))
+ utils.InternalServerError(w, fmt.Errorf("failed to parse subnet: %w", err))
return
}
s.Subnet = subnet
@@ -202,7 +235,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
if len(conf.Gateway) > 0 {
gw := net.ParseIP(conf.Gateway)
if gw == nil {
- utils.InternalServerError(w, errors.Errorf("failed to parse gateway ip %s", conf.Gateway))
+ utils.InternalServerError(w, fmt.Errorf("failed to parse gateway ip %s", conf.Gateway))
return
}
s.Gateway = gw
@@ -210,17 +243,17 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
if len(conf.IPRange) > 0 {
_, net, err := net.ParseCIDR(conf.IPRange)
if err != nil {
- utils.InternalServerError(w, errors.Wrap(err, "failed to parse ip range"))
+ utils.InternalServerError(w, fmt.Errorf("failed to parse ip range: %w", err))
return
}
startIP, err := netutil.FirstIPInSubnet(net)
if err != nil {
- utils.InternalServerError(w, errors.Wrap(err, "failed to get first ip in range"))
+ utils.InternalServerError(w, fmt.Errorf("failed to get first ip in range: %w", err))
return
}
lastIP, err := netutil.LastIPInSubnet(net)
if err != nil {
- utils.InternalServerError(w, errors.Wrap(err, "failed to get last ip in range"))
+ utils.InternalServerError(w, fmt.Errorf("failed to get last ip in range: %w", err))
return
}
s.LeaseRange = &nettypes.LeaseRange{
@@ -242,9 +275,10 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
body := struct {
ID string `json:"Id"`
- Warning string
+ Warning string `json:"Warning"`
}{
- ID: newNetwork.ID,
+ ID: newNetwork.ID,
+ Warning: responseWarning,
}
utils.WriteResponse(w, http.StatusCreated, body)
}
@@ -262,7 +296,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -278,12 +312,12 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
return
}
if len(reports) == 0 {
- utils.Error(w, http.StatusInternalServerError, errors.Errorf("internal error"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("internal error"))
return
}
report := reports[0]
if report.Err != nil {
- if errors.Cause(report.Err) == define.ErrNoSuchNetwork {
+ if errors.Is(report.Err, define.ErrNoSuchNetwork) {
utils.Error(w, http.StatusNotFound, define.ErrNoSuchNetwork)
return
}
@@ -300,7 +334,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
var netConnect types.NetworkConnect
if err := json.NewDecoder(r.Body).Decode(&netConnect); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
@@ -317,7 +351,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
staticIP := net.ParseIP(netConnect.EndpointConfig.IPAddress)
if staticIP == nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Errorf("failed to parse the ip address %q", netConnect.EndpointConfig.IPAddress))
+ fmt.Errorf("failed to parse the ip address %q", netConnect.EndpointConfig.IPAddress))
return
}
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
@@ -329,7 +363,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
staticIP := net.ParseIP(netConnect.EndpointConfig.IPAMConfig.IPv4Address)
if staticIP == nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Errorf("failed to parse the ipv4 address %q", netConnect.EndpointConfig.IPAMConfig.IPv4Address))
+ fmt.Errorf("failed to parse the ipv4 address %q", netConnect.EndpointConfig.IPAMConfig.IPv4Address))
return
}
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
@@ -339,7 +373,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
staticIP := net.ParseIP(netConnect.EndpointConfig.IPAMConfig.IPv6Address)
if staticIP == nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Errorf("failed to parse the ipv6 address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
+ fmt.Errorf("failed to parse the ipv6 address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
return
}
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
@@ -350,7 +384,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
staticMac, err := net.ParseMAC(netConnect.EndpointConfig.MacAddress)
if err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Errorf("failed to parse the mac address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
+ fmt.Errorf("failed to parse the mac address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
return
}
netOpts.StaticMAC = nettypes.HardwareAddr(staticMac)
@@ -358,11 +392,11 @@ func Connect(w http.ResponseWriter, r *http.Request) {
}
err := runtime.ConnectContainerToNetwork(netConnect.Container, name, netOpts)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, netConnect.Container, err)
return
}
- if errors.Cause(err) == define.ErrNoSuchNetwork {
+ if errors.Is(err, define.ErrNoSuchNetwork) {
utils.Error(w, http.StatusNotFound, err)
return
}
@@ -378,18 +412,18 @@ func Disconnect(w http.ResponseWriter, r *http.Request) {
var netDisconnect types.NetworkDisconnect
if err := json.NewDecoder(r.Body).Decode(&netDisconnect); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
name := utils.GetName(r)
err := runtime.DisconnectContainerFromNetwork(netDisconnect.Container, name, netDisconnect.Force)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.Error(w, http.StatusNotFound, err)
return
}
- if errors.Cause(err) == define.ErrNoSuchNetwork {
+ if errors.Is(err, define.ErrNoSuchNetwork) {
utils.Error(w, http.StatusNotFound, err)
return
}
@@ -404,7 +438,7 @@ func Prune(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
diff --git a/pkg/api/handlers/compat/resize.go b/pkg/api/handlers/compat/resize.go
index ce7340f62..a2caf6e35 100644
--- a/pkg/api/handlers/compat/resize.go
+++ b/pkg/api/handlers/compat/resize.go
@@ -1,17 +1,18 @@
package compat
import (
+ "errors"
"fmt"
"net/http"
"strings"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
api "github.com/containers/podman/v4/pkg/api/types"
"github.com/gorilla/mux"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func ResizeTTY(w http.ResponseWriter, r *http.Request) {
@@ -28,11 +29,11 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
- sz := define.TerminalSize{
+ sz := resize.TerminalSize{
Width: query.Width,
Height: query.Height,
}
@@ -47,8 +48,8 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
return
}
if err := ctnr.AttachResize(sz); err != nil {
- if errors.Cause(err) != define.ErrCtrStateInvalid {
- utils.InternalServerError(w, errors.Wrapf(err, "cannot resize container"))
+ if !errors.Is(err, define.ErrCtrStateInvalid) {
+ utils.InternalServerError(w, fmt.Errorf("cannot resize container: %w", err))
} else {
utils.Error(w, http.StatusConflict, err)
}
@@ -65,15 +66,15 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
return
}
if state, err := ctnr.State(); err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "cannot obtain session container state"))
+ utils.InternalServerError(w, fmt.Errorf("cannot obtain session container state: %w", err))
return
} else if state != define.ContainerStateRunning && !query.IgnoreNotRunning {
utils.Error(w, http.StatusConflict, fmt.Errorf("container %q in wrong state %q", name, state.String()))
return
}
if err := ctnr.ExecResize(name, sz); err != nil {
- if errors.Cause(err) != define.ErrExecSessionStateInvalid || !query.IgnoreNotRunning {
- utils.InternalServerError(w, errors.Wrapf(err, "cannot resize session"))
+ if !errors.Is(err, define.ErrExecSessionStateInvalid) || !query.IgnoreNotRunning {
+ utils.InternalServerError(w, fmt.Errorf("cannot resize session: %w", err))
return
}
}
diff --git a/pkg/api/handlers/compat/secrets.go b/pkg/api/handlers/compat/secrets.go
index 5031bf76b..13b3c4e24 100644
--- a/pkg/api/handlers/compat/secrets.go
+++ b/pkg/api/handlers/compat/secrets.go
@@ -4,7 +4,10 @@ import (
"bytes"
"encoding/base64"
"encoding/json"
+ "errors"
+ "fmt"
"net/http"
+ "strings"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
@@ -12,14 +15,13 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func ListSecrets(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
filtersMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
ic := abi.ContainerEngine{Libpod: runtime}
@@ -106,11 +108,11 @@ func CreateSecret(w http.ResponseWriter, r *http.Request) {
}{}
if err := json.NewDecoder(r.Body).Decode(&createParams); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
if len(createParams.Labels) > 0 {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(errors.New("bad parameter"), "labels not supported"))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("labels not supported: %w", errors.New("bad parameter")))
return
}
@@ -121,7 +123,7 @@ func CreateSecret(w http.ResponseWriter, r *http.Request) {
ic := abi.ContainerEngine{Libpod: runtime}
report, err := ic.SecretCreate(r.Context(), createParams.Name, reader, opts)
if err != nil {
- if errors.Cause(err).Error() == "secret name in use" {
+ if strings.Contains(err.Error(), "secret name in use") {
utils.Error(w, http.StatusConflict, err)
return
}
diff --git a/pkg/api/handlers/compat/version.go b/pkg/api/handlers/compat/version.go
index cfc3468c2..0d34fbd98 100644
--- a/pkg/api/handlers/compat/version.go
+++ b/pkg/api/handlers/compat/version.go
@@ -13,7 +13,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/entities/types"
"github.com/containers/podman/v4/version"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,7 +27,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) {
info, err := runtime.Info()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to obtain system memory info"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to obtain system memory info: %w", err))
return
}
diff --git a/pkg/api/handlers/compat/volumes.go b/pkg/api/handlers/compat/volumes.go
index ff0a7af02..fe6ae36aa 100644
--- a/pkg/api/handlers/compat/volumes.go
+++ b/pkg/api/handlers/compat/volumes.go
@@ -3,6 +3,8 @@ package compat
import (
"bytes"
"encoding/json"
+ "errors"
+ "fmt"
"net/http"
"net/url"
"time"
@@ -18,7 +20,6 @@ import (
docker_api_types "github.com/docker/docker/api/types"
docker_api_types_volume "github.com/docker/docker/api/types/volume"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func ListVolumes(w http.ResponseWriter, r *http.Request) {
@@ -27,7 +28,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
filtersMap, err := util.PrepareFilters(r)
if err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -36,7 +37,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
for filter := range *filtersMap {
if filter == "opts" {
utils.Error(w, http.StatusInternalServerError,
- errors.Errorf("unsupported libpod filters passed to docker endpoint"))
+ fmt.Errorf("unsupported libpod filters passed to docker endpoint"))
return
}
}
@@ -86,13 +87,13 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
query := struct{}{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
// decode params from body
input := docker_api_types_volume.VolumeCreateBody{}
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
@@ -103,7 +104,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
if len(input.Name) != 0 {
// See if the volume exists already
existingVolume, err = runtime.GetVolume(input.Name)
- if err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
+ if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
utils.InternalServerError(w, err)
return
}
@@ -219,7 +220,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -239,7 +240,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
if err == nil {
// As above, we do not pass `force` from the query parameters here
if err := runtime.RemoveVolume(r.Context(), vol, false, query.Timeout); err != nil {
- if errors.Cause(err) == define.ErrVolumeBeingUsed {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
utils.Error(w, http.StatusConflict, err)
} else {
utils.InternalServerError(w, err)
@@ -264,14 +265,14 @@ func PruneVolumes(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
f := (url.Values)(*filterMap)
filterFuncs, err := filters.GeneratePruneVolumeFilters(f)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse filters for %s", f.Encode()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse filters for %s: %w", f.Encode(), err))
return
}
diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go
index 6b5bee403..5d85d4009 100644
--- a/pkg/api/handlers/libpod/containers.go
+++ b/pkg/api/handlers/libpod/containers.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -16,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -35,7 +35,7 @@ func ContainerExists(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -45,7 +45,7 @@ func ContainerExists(w http.ResponseWriter, r *http.Request) {
report, err := containerEngine.ContainerExists(r.Context(), name, options)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
@@ -75,12 +75,12 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to decode filter parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode filter parameters for %s: %w", r.URL.String(), err))
return
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -115,10 +115,6 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
- if len(pss) == 0 {
- utils.WriteResponse(w, http.StatusOK, "[]")
- return
- }
utils.WriteResponse(w, http.StatusOK, pss)
}
@@ -131,7 +127,7 @@ func GetContainer(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
@@ -225,7 +221,7 @@ func Checkpoint(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -311,7 +307,7 @@ func Restore(w http.ResponseWriter, r *http.Request) {
// override any golang type defaults
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -348,11 +344,11 @@ func Restore(w http.ResponseWriter, r *http.Request) {
ir := abi.ImageEngine{Libpod: runtime}
report, err := ir.Exists(r.Context(), name)
if err != nil {
- utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find container or checkpoint image %s", name))
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find container or checkpoint image %s: %w", name, err))
return
}
if !report.Value {
- utils.Error(w, http.StatusNotFound, errors.Errorf("failed to find container or checkpoint image %s", name))
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find container or checkpoint image %s", name))
return
}
}
@@ -384,7 +380,7 @@ func InitContainer(w http.ResponseWriter, r *http.Request) {
return
}
err = ctr.Init(r.Context(), ctr.PodID() != "")
- if errors.Cause(err) == define.ErrCtrStateInvalid {
+ if errors.Is(err, define.ErrCtrStateInvalid) {
utils.Error(w, http.StatusNotModified, err)
return
}
@@ -404,7 +400,7 @@ func ShouldRestart(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
report, err := containerEngine.ShouldRestart(r.Context(), name)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go
index 4fff9e345..e4964d602 100644
--- a/pkg/api/handlers/libpod/containers_create.go
+++ b/pkg/api/handlers/libpod/containers_create.go
@@ -3,6 +3,7 @@ package libpod
import (
"context"
"encoding/json"
+ "fmt"
"net/http"
"strconv"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgen/generate"
"github.com/containers/podman/v4/pkg/specgenutil"
- "github.com/pkg/errors"
)
// CreateContainer takes a specgenerator and makes a container. It returns
@@ -34,7 +34,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
}
if err := json.NewDecoder(r.Body).Decode(&sg); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decode(): %w", err))
return
}
if sg.Passwd == nil {
diff --git a/pkg/api/handlers/libpod/containers_stats.go b/pkg/api/handlers/libpod/containers_stats.go
index d34254fd7..687d61c12 100644
--- a/pkg/api/handlers/libpod/containers_stats.go
+++ b/pkg/api/handlers/libpod/containers_stats.go
@@ -2,6 +2,8 @@ package libpod
import (
"encoding/json"
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/common/pkg/cgroups"
@@ -12,7 +14,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -24,8 +25,7 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) {
if rootless.IsRootless() {
// if so, then verify cgroup v2 available (more expensive check)
if isV2, _ := cgroups.IsCgroup2UnifiedMode(); !isV2 {
- msg := "Container stats resource only available for cgroup v2"
- utils.Error(w, http.StatusConflict, errors.New(msg))
+ utils.Error(w, http.StatusConflict, errors.New("container stats resource only available for cgroup v2"))
return
}
}
@@ -39,7 +39,7 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) {
Interval: 5,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -66,7 +66,7 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) {
flusher.Flush()
}
- // Setup JSON encoder for streaming.
+ // Set up JSON encoder for streaming.
coder := json.NewEncoder(w)
coder.SetEscapeHTML(true)
diff --git a/pkg/api/handlers/libpod/generate.go b/pkg/api/handlers/libpod/generate.go
index b1ac6a65a..48c4c59e1 100644
--- a/pkg/api/handlers/libpod/generate.go
+++ b/pkg/api/handlers/libpod/generate.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func GenerateSystemd(w http.ResponseWriter, r *http.Request) {
@@ -37,7 +37,7 @@ func GenerateSystemd(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -76,7 +76,7 @@ func GenerateSystemd(w http.ResponseWriter, r *http.Request) {
report, err := containerEngine.GenerateSystemd(r.Context(), utils.GetName(r), options)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error generating systemd units"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error generating systemd units: %w", err))
return
}
@@ -94,7 +94,7 @@ func GenerateKube(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -102,7 +102,7 @@ func GenerateKube(w http.ResponseWriter, r *http.Request) {
options := entities.GenerateKubeOptions{Service: query.Service}
report, err := containerEngine.GenerateKube(r.Context(), query.Names, options)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error generating YAML"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error generating YAML: %w", err))
return
}
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index efcbe9d77..ed1c65f8e 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -21,13 +22,14 @@ import (
api "github.com/containers/podman/v4/pkg/api/types"
"github.com/containers/podman/v4/pkg/auth"
"github.com/containers/podman/v4/pkg/domain/entities"
+ "github.com/containers/podman/v4/pkg/domain/entities/reports"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
+ domainUtils "github.com/containers/podman/v4/pkg/domain/utils"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/util"
utils2 "github.com/containers/podman/v4/utils"
"github.com/containers/storage"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
// Commit
@@ -48,11 +50,11 @@ func ImageExists(w http.ResponseWriter, r *http.Request) {
ir := abi.ImageEngine{Libpod: runtime}
report, err := ir.Exists(r.Context(), name)
if err != nil {
- utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find image %s", name))
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %w", name, err))
return
}
if !report.Value {
- utils.Error(w, http.StatusNotFound, errors.Errorf("failed to find image %s", name))
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s", name))
return
}
utils.WriteResponse(w, http.StatusNoContent, "")
@@ -68,18 +70,18 @@ func ImageTree(w http.ResponseWriter, r *http.Request) {
WhatRequires: false,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
ir := abi.ImageEngine{Libpod: runtime}
options := entities.ImageTreeOptions{WhatRequires: query.WhatRequires}
report, err := ir.Tree(r.Context(), name, options)
if err != nil {
- if errors.Cause(err) == storage.ErrImageUnknown {
- utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find image %s", name))
+ if errors.Is(err, storage.ErrImageUnknown) {
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %w", name, err))
return
}
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to generate image tree for %s", name))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to generate image tree for %s: %w", name, err))
return
}
utils.WriteResponse(w, http.StatusOK, report)
@@ -89,13 +91,13 @@ func GetImage(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
newImage, err := utils.GetImage(r, name)
if err != nil {
- utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find image %s", name))
+ utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %w", name, err))
return
}
options := &libimage.InspectOptions{WithParent: true, WithSize: true}
inspect, err := newImage.Inspect(r.Context(), options)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed in inspect image %s", inspect.ID))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed in inspect image %s: %w", inspect.ID, err))
return
}
utils.WriteResponse(w, http.StatusOK, inspect)
@@ -115,15 +117,13 @@ func PruneImages(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.
- Wrapf(err, "failed to decode filter parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to decode filter parameters for %s: %w", r.URL.String(), err))
return
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.
- Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -172,7 +172,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -186,23 +186,23 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
case define.OCIArchive, define.V2s2Archive:
tmpfile, err := ioutil.TempFile("", "api.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
output = tmpfile.Name()
if err := tmpfile.Close(); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
return
}
case define.OCIManifestDir, define.V2s2ManifestDir:
tmpdir, err := ioutil.TempDir("", "save")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempdir"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempdir: %w", err))
return
}
output = tmpdir
default:
- utils.Error(w, http.StatusInternalServerError, errors.Errorf("unknown format %q", query.Format))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unknown format %q", query.Format))
return
}
@@ -231,7 +231,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
}
rdr, err := os.Open(output)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
return
}
defer rdr.Close()
@@ -252,20 +252,20 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
// References are mandatory!
if len(query.References) == 0 {
- utils.Error(w, http.StatusBadRequest, errors.New("No references"))
+ utils.Error(w, http.StatusBadRequest, errors.New("no references"))
return
}
// Format is mandatory! Currently, we only support multi-image docker
// archives.
if len(query.References) > 1 && query.Format != define.V2s2Archive {
- utils.Error(w, http.StatusInternalServerError, errors.Errorf("multi-image archives must use format of %s", define.V2s2Archive))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("multi-image archives must use format of %s", define.V2s2Archive))
return
}
@@ -283,23 +283,23 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
case define.V2s2Archive, define.OCIArchive:
tmpfile, err := ioutil.TempFile("", "api.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
output = tmpfile.Name()
if err := tmpfile.Close(); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
return
}
case define.OCIManifestDir, define.V2s2ManifestDir:
tmpdir, err := ioutil.TempDir("", "save")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tmpdir"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tmpdir: %w", err))
return
}
output = tmpdir
default:
- utils.Error(w, http.StatusInternalServerError, errors.Errorf("unsupported format %q", query.Format))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unsupported format %q", query.Format))
return
}
defer os.RemoveAll(output)
@@ -321,7 +321,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
rdr, err := os.Open(output)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
return
}
defer rdr.Close()
@@ -333,7 +333,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
tmpfile, err := ioutil.TempFile("", "libpod-images-load.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
defer os.Remove(tmpfile.Name())
@@ -342,7 +342,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
tmpfile.Close()
if err != nil && err != io.EOF {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to write archive to temporary file"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to write archive to temporary file: %w", err))
return
}
@@ -351,7 +351,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
loadOptions := entities.ImageLoadOptions{Input: tmpfile.Name()}
loadReport, err := imageEngine.Load(r.Context(), loadOptions)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to load image"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to load image: %w", err))
return
}
utils.WriteResponse(w, http.StatusOK, loadReport)
@@ -373,7 +373,7 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -382,14 +382,14 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) {
if len(query.URL) == 0 {
tmpfile, err := ioutil.TempFile("", "libpod-images-import.tar")
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
return
}
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
if _, err := io.Copy(tmpfile, r.Body); err != nil && err != io.EOF {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to write archive to temporary file"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to write archive to temporary file: %w", err))
return
}
@@ -409,7 +409,7 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) {
}
report, err := imageEngine.Import(r.Context(), importOptions)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to import tarball"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to import tarball: %w", err))
return
}
@@ -422,15 +422,16 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
query := struct {
- Destination string `schema:"destination"`
- TLSVerify bool `schema:"tlsVerify"`
- Format string `schema:"format"`
- All bool `schema:"all"`
+ All bool `schema:"all"`
+ Destination string `schema:"destination"`
+ Format string `schema:"format"`
+ RemoveSignatures bool `schema:"removeSignatures"`
+ TLSVerify bool `schema:"tlsVerify"`
}{
// This is where you can override the golang default value for one of fields
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -462,12 +463,13 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
password = authconf.Password
}
options := entities.ImagePushOptions{
- Authfile: authfile,
- Username: username,
- Password: password,
- Format: query.Format,
- All: query.All,
- Quiet: true,
+ All: query.All,
+ Authfile: authfile,
+ Format: query.Format,
+ Password: password,
+ Quiet: true,
+ RemoveSignatures: query.RemoveSignatures,
+ Username: username,
}
if _, found := r.URL.Query()["tlsVerify"]; found {
options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
@@ -475,7 +477,7 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
imageEngine := abi.ImageEngine{Libpod: runtime}
if err := imageEngine.Push(context.Background(), source, destination, options); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "error pushing image %q", destination))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("error pushing image %q: %w", destination, err))
return
}
@@ -505,12 +507,12 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
rtc, err := runtime.GetConfig()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to get runtime config"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to get runtime config: %w", err))
return
}
sc := runtime.SystemContext()
@@ -528,7 +530,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
case "docker":
mimeType = manifest.DockerV2Schema2MediaType
default:
- utils.InternalServerError(w, errors.Errorf("unrecognized image format %q", query.Format))
+ utils.InternalServerError(w, fmt.Errorf("unrecognized image format %q", query.Format))
return
}
options.CommitOptions = buildah.CommitOptions{
@@ -557,10 +559,10 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
}
commitImage, err := ctr.Commit(r.Context(), destImage, options)
if err != nil && !strings.Contains(err.Error(), "is not running") {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "CommitFailure"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("CommitFailure: %w", err))
return
}
- utils.WriteResponse(w, http.StatusOK, entities.IDResponse{ID: commitImage.ID()}) // nolint
+ utils.WriteResponse(w, http.StatusOK, entities.IDResponse{ID: commitImage.ID()})
}
func UntagImage(w http.ResponseWriter, r *http.Request) {
@@ -596,8 +598,8 @@ func UntagImage(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
if err := imageEngine.Untag(r.Context(), name, tags, opts); err != nil {
- if errors.Cause(err) == storage.ErrImageUnknown {
- utils.ImageNotFound(w, name, errors.Wrapf(err, "failed to find image %s", name))
+ if errors.Is(err, storage.ErrImageUnknown) {
+ utils.ImageNotFound(w, name, fmt.Errorf("failed to find image %s: %w", name, err))
} else {
utils.Error(w, http.StatusInternalServerError, err)
}
@@ -611,18 +613,19 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
query := struct {
- All bool `schema:"all"`
- Force bool `schema:"force"`
- Ignore bool `schema:"ignore"`
- Images []string `schema:"images"`
+ All bool `schema:"all"`
+ Force bool `schema:"force"`
+ Ignore bool `schema:"ignore"`
+ LookupManifest bool `schema:"lookupManifest"`
+ Images []string `schema:"images"`
}{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
- opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force, Ignore: query.Ignore}
+ opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force, Ignore: query.Ignore, LookupManifest: query.LookupManifest}
imageEngine := abi.ImageEngine{Libpod: runtime}
rmReport, rmErrors := imageEngine.Remove(r.Context(), query.Images, opts)
strErrs := errorhandling.ErrorsToStrings(rmErrors)
@@ -635,17 +638,18 @@ func ImagesRemove(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
query := struct {
- Force bool `schema:"force"`
+ Force bool `schema:"force"`
+ LookupManifest bool `schema:"lookupManifest"`
}{
Force: false,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
- opts := entities.ImageRemoveOptions{Force: query.Force}
+ opts := entities.ImageRemoveOptions{Force: query.Force, LookupManifest: query.LookupManifest}
imageEngine := abi.ImageEngine{Libpod: runtime}
rmReport, rmErrors := imageEngine.Remove(r.Context(), []string{utils.GetName(r)}, opts)
@@ -668,3 +672,32 @@ func ImagesRemove(w http.ResponseWriter, r *http.Request) {
utils.Error(w, http.StatusInternalServerError, errorhandling.JoinErrors(rmErrors))
}
}
+
+func ImageScp(w http.ResponseWriter, r *http.Request) {
+ decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
+ query := struct {
+ Destination string `schema:"destination"`
+ Quiet bool `schema:"quiet"`
+ }{
+ // This is where you can override the golang default value for one of fields
+ }
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
+ return
+ }
+
+ sourceArg := utils.GetName(r)
+
+ rep, source, dest, _, err := domainUtils.ExecuteTransfer(sourceArg, query.Destination, []string{}, query.Quiet)
+ if err != nil {
+ utils.Error(w, http.StatusInternalServerError, err)
+ return
+ }
+
+ if source != nil || dest != nil {
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("cannot use the user transfer function on the remote client: %w", define.ErrInvalidArg))
+ return
+ }
+
+ utils.WriteResponse(w, http.StatusOK, &reports.ScpReport{Id: rep.Names[0]})
+}
diff --git a/pkg/api/handlers/libpod/images_pull.go b/pkg/api/handlers/libpod/images_pull.go
index 2cd45fb63..7e24ae5ac 100644
--- a/pkg/api/handlers/libpod/images_pull.go
+++ b/pkg/api/handlers/libpod/images_pull.go
@@ -3,6 +3,8 @@ package libpod
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/common/libimage"
@@ -15,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/channel"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -41,7 +42,7 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go
index 65b9d6cb5..3235a2972 100644
--- a/pkg/api/handlers/libpod/manifests.go
+++ b/pkg/api/handlers/libpod/manifests.go
@@ -3,6 +3,7 @@ package libpod
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -24,7 +25,6 @@ import (
"github.com/gorilla/mux"
"github.com/gorilla/schema"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func ManifestCreate(w http.ResponseWriter, r *http.Request) {
@@ -45,7 +45,7 @@ func ManifestCreate(w http.ResponseWriter, r *http.Request) {
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -54,7 +54,7 @@ func ManifestCreate(w http.ResponseWriter, r *http.Request) {
n, err := url.QueryUnescape(name)
if err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse name parameter %q", name))
+ fmt.Errorf("failed to parse name parameter %q: %w", name, err))
return
}
query.Name = n
@@ -62,7 +62,7 @@ func ManifestCreate(w http.ResponseWriter, r *http.Request) {
if _, err := reference.ParseNormalizedNamed(query.Name); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "invalid image name %s", query.Name))
+ fmt.Errorf("invalid image name %s: %w", query.Name, err))
return
}
@@ -94,7 +94,7 @@ func ManifestCreate(w http.ResponseWriter, r *http.Request) {
body := new(entities.ManifestModifyOptions)
if err := json.Unmarshal(buffer, body); err != nil {
- utils.InternalServerError(w, errors.Wrap(err, "Decode()"))
+ utils.InternalServerError(w, fmt.Errorf("Decode(): %w", err))
return
}
@@ -163,11 +163,10 @@ func ManifestAddV3(w http.ResponseWriter, r *http.Request) {
// Wrapper to support 3.x with 4.x libpod
query := struct {
entities.ManifestAddOptions
- Images []string
TLSVerify bool `schema:"tlsVerify"`
}{}
if err := json.NewDecoder(r.Body).Decode(&query); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
@@ -221,7 +220,7 @@ func ManifestRemoveDigestV3(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
manifestList, err := runtime.LibimageRuntime().LookupManifestList(name)
@@ -248,15 +247,16 @@ func ManifestPushV3(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
query := struct {
- All bool `schema:"all"`
- Destination string `schema:"destination"`
- TLSVerify bool `schema:"tlsVerify"`
+ All bool `schema:"all"`
+ Destination string `schema:"destination"`
+ RemoveSignatures bool `schema:"removeSignatures"`
+ TLSVerify bool `schema:"tlsVerify"`
}{
// Add defaults here once needed.
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if err := utils.IsRegistryReference(query.Destination); err != nil {
@@ -277,10 +277,11 @@ func ManifestPushV3(w http.ResponseWriter, r *http.Request) {
password = authconf.Password
}
options := entities.ImagePushOptions{
- Authfile: authfile,
- Username: username,
- Password: password,
- All: query.All,
+ All: query.All,
+ Authfile: authfile,
+ Password: password,
+ RemoveSignatures: query.RemoveSignatures,
+ Username: username,
}
if sys := runtime.SystemContext(); sys != nil {
options.CertDir = sys.DockerCertPath
@@ -291,7 +292,7 @@ func ManifestPushV3(w http.ResponseWriter, r *http.Request) {
imageEngine := abi.ImageEngine{Libpod: runtime}
digest, err := imageEngine.ManifestPush(context.Background(), source, query.Destination, options)
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "error pushing image %q", query.Destination))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("error pushing image %q: %w", query.Destination, err))
return
}
utils.WriteResponse(w, http.StatusOK, entities.IDResponse{ID: digest})
@@ -312,7 +313,7 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -324,7 +325,7 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) {
authconf, authfile, err := auth.GetCredentials(r)
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse registry header for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse registry header for %s: %w", r.URL.String(), err))
return
}
defer auth.RemoveAuthfile(authfile)
@@ -350,7 +351,7 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) {
source := utils.GetName(r)
digest, err := imageEngine.ManifestPush(context.Background(), source, destination, options)
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "error pushing image %q", destination))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("error pushing image %q: %w", destination, err))
return
}
utils.WriteResponse(w, http.StatusOK, entities.IDResponse{ID: digest})
@@ -363,7 +364,7 @@ func ManifestModify(w http.ResponseWriter, r *http.Request) {
body := new(entities.ManifestModifyOptions)
if err := json.NewDecoder(r.Body).Decode(body); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
diff --git a/pkg/api/handlers/libpod/networks.go b/pkg/api/handlers/libpod/networks.go
index 16f499d4c..7169a6d44 100644
--- a/pkg/api/handlers/libpod/networks.go
+++ b/pkg/api/handlers/libpod/networks.go
@@ -2,8 +2,11 @@ package libpod
import (
"encoding/json"
+ "fmt"
"net/http"
+ "errors"
+
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
@@ -13,7 +16,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func CreateNetwork(w http.ResponseWriter, r *http.Request) {
@@ -25,7 +27,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
network := types.Network{}
if err := json.NewDecoder(r.Body).Decode(&network); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to decode request JSON payload"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode request JSON payload: %w", err))
return
}
@@ -52,7 +54,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -83,7 +85,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -99,7 +101,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
}
if reports[0].Err != nil {
// If the network cannot be found, we return a 404.
- if errors.Cause(reports[0].Err) == define.ErrNoSuchNetwork {
+ if errors.Is(reports[0].Err, define.ErrNoSuchNetwork) {
utils.Error(w, http.StatusNotFound, reports[0].Err)
return
}
@@ -142,18 +144,18 @@ func Connect(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
var netConnect entities.NetworkConnectOptions
if err := json.NewDecoder(r.Body).Decode(&netConnect); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to decode request JSON payload"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode request JSON payload: %w", err))
return
}
name := utils.GetName(r)
err := runtime.ConnectContainerToNetwork(netConnect.Container, name, netConnect.PerNetworkOptions)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, netConnect.Container, err)
return
}
- if errors.Cause(err) == define.ErrNoSuchNetwork {
+ if errors.Is(err, define.ErrNoSuchNetwork) {
utils.Error(w, http.StatusNotFound, err)
return
}
diff --git a/pkg/api/handlers/libpod/play.go b/pkg/api/handlers/libpod/play.go
index b71afc28c..f8ce52a72 100644
--- a/pkg/api/handlers/libpod/play.go
+++ b/pkg/api/handlers/libpod/play.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "fmt"
"net"
"net/http"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func PlayKube(w http.ResponseWriter, r *http.Request) {
@@ -34,7 +34,7 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -42,7 +42,7 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
for _, ipString := range query.StaticIPs {
ip := net.ParseIP(ipString)
if ip == nil {
- utils.Error(w, http.StatusBadRequest, errors.Errorf("Invalid IP address %s", ipString))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("invalid IP address %s", ipString))
return
}
staticIPs = append(staticIPs, ip)
@@ -77,7 +77,7 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
- query.LogDriver = config.Containers.LogDriver
+ logDriver = config.Containers.LogDriver
}
containerEngine := abi.ContainerEngine{Libpod: runtime}
@@ -89,7 +89,7 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
Networks: query.Network,
NoHosts: query.NoHosts,
Quiet: true,
- LogDriver: query.LogDriver,
+ LogDriver: logDriver,
LogOptions: query.LogOptions,
StaticIPs: staticIPs,
StaticMACs: staticMACs,
@@ -103,7 +103,7 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
report, err := containerEngine.PlayKube(r.Context(), r.Body, options)
_ = r.Body.Close()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error playing YAML file"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error playing YAML file: %w", err))
return
}
utils.WriteResponse(w, http.StatusOK, report)
@@ -116,7 +116,7 @@ func PlayKubeDown(w http.ResponseWriter, r *http.Request) {
report, err := containerEngine.PlayKubeDown(r.Context(), r.Body, *options)
_ = r.Body.Close()
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error tearing down YAML file"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error tearing down YAML file: %w", err))
return
}
utils.WriteResponse(w, http.StatusOK, report)
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index 5b92358fa..8b1d456ec 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -2,6 +2,7 @@ package libpod
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
"strings"
@@ -19,7 +20,6 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -33,11 +33,11 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
)
psg := specgen.PodSpecGenerator{InfraContainerSpec: &specgen.SpecGenerator{}}
if err := json.NewDecoder(r.Body).Decode(&psg); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
if !psg.NoInfra {
@@ -51,17 +51,17 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
}
err = specgenutil.FillOutSpecGen(psg.InfraContainerSpec, &infraOptions, []string{}) // necessary for default values in many cases (userns, idmappings)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error filling out specgen"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error filling out specgen: %w", err))
return
}
out, err := json.Marshal(psg) // marshal our spec so the matching options can be unmarshaled into infra
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
err = json.Unmarshal(out, psg.InfraContainerSpec) // unmarhal matching options
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
// a few extra that do not have the same json tags
@@ -75,10 +75,10 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
pod, err := generate.MakePod(&podSpecComplete, runtime)
if err != nil {
httpCode := http.StatusInternalServerError
- if errors.Cause(err) == define.ErrPodExists {
+ if errors.Is(err, define.ErrPodExists) {
httpCode = http.StatusConflict
}
- utils.Error(w, httpCode, errors.Wrap(err, "failed to make pod"))
+ utils.Error(w, httpCode, fmt.Errorf("failed to make pod: %w", err))
return
}
utils.WriteResponse(w, http.StatusCreated, entities.IDResponse{ID: pod.ID()})
@@ -89,7 +89,7 @@ func Pods(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -139,7 +139,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -164,7 +164,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
} else {
responses, stopError = pod.Stop(r.Context(), false)
}
- if stopError != nil && errors.Cause(stopError) != define.ErrPodPartialFail {
+ if stopError != nil && !errors.Is(stopError, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
@@ -178,7 +178,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
report := entities.PodStopReport{Id: pod.ID()}
for id, err := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(err, "error stopping container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error stopping container %s: %w", id, err))
}
code := http.StatusOK
@@ -207,14 +207,14 @@ func PodStart(w http.ResponseWriter, r *http.Request) {
}
responses, err := pod.Start(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusConflict, err)
return
}
report := entities.PodStartReport{Id: pod.ID()}
for id, err := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(err, "error starting container "+id))
+ report.Errs = append(report.Errs, fmt.Errorf("%v: %w", "error starting container "+id, err))
}
code := http.StatusOK
@@ -237,7 +237,7 @@ func PodDelete(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -263,14 +263,14 @@ func PodRestart(w http.ResponseWriter, r *http.Request) {
return
}
responses, err := pod.Restart(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
report := entities.PodRestartReport{Id: pod.ID()}
for id, err := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(err, "error restarting container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error restarting container %s: %w", id, err))
}
code := http.StatusOK
@@ -314,14 +314,14 @@ func PodPause(w http.ResponseWriter, r *http.Request) {
return
}
responses, err := pod.Pause(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
report := entities.PodPauseReport{Id: pod.ID()}
for id, v := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error pausing container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error pausing container %s: %w", id, v))
}
code := http.StatusOK
@@ -340,14 +340,14 @@ func PodUnpause(w http.ResponseWriter, r *http.Request) {
return
}
responses, err := pod.Unpause(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
report := entities.PodUnpauseReport{Id: pod.ID()}
for id, v := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error unpausing container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error unpausing container %s: %w", id, v))
}
code := http.StatusOK
@@ -374,7 +374,7 @@ func PodTop(w http.ResponseWriter, r *http.Request) {
PsArgs: psArgs,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -456,7 +456,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
// override any golang type defaults
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if _, found := r.URL.Query()["signal"]; found {
@@ -465,7 +465,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
sig, err := util.ParseSignal(signal)
if err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "unable to parse signal value"))
+ utils.InternalServerError(w, fmt.Errorf("unable to parse signal value: %w", err))
return
}
name := utils.GetName(r)
@@ -488,12 +488,12 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
}
}
if !hasRunning {
- utils.Error(w, http.StatusConflict, errors.Errorf("cannot kill a pod with no running containers: %s", pod.ID()))
+ utils.Error(w, http.StatusConflict, fmt.Errorf("cannot kill a pod with no running containers: %s", pod.ID()))
return
}
responses, err := pod.Kill(r.Context(), uint(sig))
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
@@ -530,11 +530,15 @@ func PodStats(w http.ResponseWriter, r *http.Request) {
query := struct {
NamesOrIDs []string `schema:"namesOrIDs"`
All bool `schema:"all"`
+ Stream bool `schema:"stream"`
+ Delay int `schema:"delay"`
}{
// default would go here
+ Delay: 5,
+ Stream: false,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -544,21 +548,49 @@ func PodStats(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
}
+ var flush = func() {}
+ if flusher, ok := w.(http.Flusher); ok {
+ flush = flusher.Flush
+ }
// Collect the stats and send them over the wire.
containerEngine := abi.ContainerEngine{Libpod: runtime}
reports, err := containerEngine.PodStats(r.Context(), query.NamesOrIDs, options)
// Error checks as documented in swagger.
- switch errors.Cause(err) {
- case define.ErrNoSuchPod:
- utils.Error(w, http.StatusNotFound, err)
- return
- case nil:
- // Nothing to do.
- default:
+ if err != nil {
+ if errors.Is(err, define.ErrNoSuchPod) {
+ utils.Error(w, http.StatusNotFound, err)
+ return
+ }
utils.InternalServerError(w, err)
return
}
- utils.WriteResponse(w, http.StatusOK, reports)
+ w.Header().Set("Content-Type", "application/json")
+ coder := json.NewEncoder(w)
+ coder.SetEscapeHTML(true)
+
+ if err := coder.Encode(reports); err != nil {
+ logrus.Infof("Error from %s %q : %v", r.Method, r.URL, err)
+ }
+ flush()
+ if query.Stream {
+ for {
+ select {
+ case <-r.Context().Done():
+ return
+ default:
+ time.Sleep(time.Duration(query.Delay) * time.Second)
+ reports, err = containerEngine.PodStats(r.Context(), query.NamesOrIDs, options)
+ if err != nil {
+ return
+ }
+ if err := coder.Encode(reports); err != nil {
+ logrus.Infof("Error from %s %q : %v", r.Method, r.URL, err)
+ return
+ }
+ flush()
+ }
+ }
+ }
}
diff --git a/pkg/api/handlers/libpod/secrets.go b/pkg/api/handlers/libpod/secrets.go
index 3ea2c2ea8..6eba65f2b 100644
--- a/pkg/api/handlers/libpod/secrets.go
+++ b/pkg/api/handlers/libpod/secrets.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -9,7 +10,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func CreateSecret(w http.ResponseWriter, r *http.Request) {
@@ -27,7 +27,7 @@ func CreateSecret(w http.ResponseWriter, r *http.Request) {
}
opts := entities.SecretCreateOptions{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
diff --git a/pkg/api/handlers/libpod/swagger_spec.go b/pkg/api/handlers/libpod/swagger_spec.go
index 8eeb041d2..bfe4ab9fd 100644
--- a/pkg/api/handlers/libpod/swagger_spec.go
+++ b/pkg/api/handlers/libpod/swagger_spec.go
@@ -1,11 +1,12 @@
package libpod
import (
+ "errors"
+ "fmt"
"net/http"
"os"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
- "github.com/pkg/errors"
)
// DefaultPodmanSwaggerSpec provides the default path to the podman swagger spec file
@@ -18,7 +19,7 @@ func ServeSwagger(w http.ResponseWriter, r *http.Request) {
}
if _, err := os.Stat(path); err != nil {
if errors.Is(err, os.ErrNotExist) {
- utils.InternalServerError(w, errors.Errorf("swagger spec %q does not exist", path))
+ utils.InternalServerError(w, fmt.Errorf("swagger spec %q does not exist", path))
return
}
utils.InternalServerError(w, err)
diff --git a/pkg/api/handlers/libpod/system.go b/pkg/api/handlers/libpod/system.go
index 9ceca99e8..7418dc4df 100644
--- a/pkg/api/handlers/libpod/system.go
+++ b/pkg/api/handlers/libpod/system.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
// SystemPrune removes unused data
@@ -25,13 +25,13 @@ func SystemPrune(w http.ResponseWriter, r *http.Request) {
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
filterMap, err := util.PrepareFilters(r)
if err != nil {
utils.Error(w, http.StatusBadRequest,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go
index e792dea35..5eac76f5b 100644
--- a/pkg/api/handlers/libpod/volumes.go
+++ b/pkg/api/handlers/libpod/volumes.go
@@ -2,9 +2,12 @@ package libpod
import (
"encoding/json"
+ "fmt"
"net/http"
"net/url"
+ "errors"
+
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
@@ -16,7 +19,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi/parse"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func CreateVolume(w http.ResponseWriter, r *http.Request) {
@@ -30,14 +32,14 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
input := entities.VolumeCreateOptions{}
// decode params from body
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
@@ -108,7 +110,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -181,7 +183,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -191,7 +193,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
return
}
if err := runtime.RemoveVolume(r.Context(), vol, query.Force, query.Timeout); err != nil {
- if errors.Cause(err) == define.ErrVolumeBeingUsed {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
utils.Error(w, http.StatusConflict, err)
return
}
diff --git a/pkg/api/handlers/swagger/responses.go b/pkg/api/handlers/swagger/responses.go
index 55fc1a77f..93a508b39 100644
--- a/pkg/api/handlers/swagger/responses.go
+++ b/pkg/api/handlers/swagger/responses.go
@@ -41,6 +41,13 @@ type imagesLoadResponseLibpod struct {
Body entities.ImageLoadReport
}
+// Image Scp
+// swagger:response
+type imagesScpResponseLibpod struct {
+ // in:body
+ Body reports.ScpReport
+}
+
// Image Import
// swagger:response
type imagesImportResponseLibpod struct {
diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go
index 9eb712c30..b533e131c 100644
--- a/pkg/api/handlers/types.go
+++ b/pkg/api/handlers/types.go
@@ -2,6 +2,7 @@ package handlers
import (
"context"
+ "fmt"
"time"
"github.com/containers/common/libimage"
@@ -10,7 +11,6 @@ import (
dockerContainer "github.com/docker/docker/api/types/container"
dockerNetwork "github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
- "github.com/pkg/errors"
)
type AuthConfig struct {
@@ -237,17 +237,17 @@ func portsToPortSet(input map[string]struct{}) (nat.PortSet, error) {
case "tcp", "":
p, err := nat.NewPort("tcp", port)
if err != nil {
- return nil, errors.Wrapf(err, "unable to create tcp port from %s", k)
+ return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err)
}
ports[p] = struct{}{}
case "udp":
p, err := nat.NewPort("udp", port)
if err != nil {
- return nil, errors.Wrapf(err, "unable to create tcp port from %s", k)
+ return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err)
}
ports[p] = struct{}{}
default:
- return nil, errors.Errorf("invalid port proto %q in %q", proto, k)
+ return nil, fmt.Errorf("invalid port proto %q in %q", proto, k)
}
}
return ports, nil
diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go
index 8588b49ba..80f8522fd 100644
--- a/pkg/api/handlers/utils/containers.go
+++ b/pkg/api/handlers/utils/containers.go
@@ -2,6 +2,7 @@ package utils
import (
"context"
+ "errors"
"fmt"
"net/http"
"strconv"
@@ -19,7 +20,6 @@ import (
"github.com/containers/podman/v4/libpod"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
type waitQueryDocker struct {
@@ -39,7 +39,7 @@ func WaitContainerDocker(w http.ResponseWriter, r *http.Request) {
decoder := ctx.Value(api.DecoderKey).(*schema.Decoder)
if err = decoder.Decode(&query, r.URL.Query()); err != nil {
- Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -106,7 +106,7 @@ func WaitContainerLibpod(w http.ResponseWriter, r *http.Request) {
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
query := waitQueryLibpod{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -130,7 +130,7 @@ func WaitContainerLibpod(w http.ResponseWriter, r *http.Request) {
exitCode, err := waitFn(conditions...)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
ContainerNotFound(w, name, err)
return
}
@@ -191,14 +191,13 @@ func waitDockerCondition(ctx context.Context, containerName string, interval tim
var notRunningStates = []define.ContainerStatus{
define.ContainerStateCreated,
define.ContainerStateRemoving,
- define.ContainerStateStopped,
define.ContainerStateExited,
define.ContainerStateConfigured,
}
func waitRemoved(ctrWait containerWaitFn) (int32, error) {
code, err := ctrWait(define.ContainerStateUnknown)
- if err != nil && errors.Cause(err) == define.ErrNoSuchCtr {
+ if err != nil && errors.Is(err, define.ErrNoSuchCtr) {
return code, nil
}
return code, err
diff --git a/pkg/api/handlers/utils/errors.go b/pkg/api/handlers/utils/errors.go
index bf60b2c84..ab1b6f227 100644
--- a/pkg/api/handlers/utils/errors.go
+++ b/pkg/api/handlers/utils/errors.go
@@ -1,17 +1,18 @@
package utils
import (
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/storage"
- "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
var (
- ErrLinkNotSupport = errors.New("Link is not supported")
+ ErrLinkNotSupport = errors.New("link is not supported")
)
// TODO: document the exported functions in this file and make them more
@@ -25,7 +26,7 @@ func Error(w http.ResponseWriter, code int, err error) {
// Log detailed message of what happened to machine running podman service
log.Infof("Request Failed(%s): %s", http.StatusText(code), err.Error())
em := errorhandling.ErrorModel{
- Because: (errors.Cause(err)).Error(),
+ Because: errorhandling.Cause(err).Error(),
Message: err.Error(),
ResponseCode: code,
}
@@ -33,51 +34,50 @@ func Error(w http.ResponseWriter, code int, err error) {
}
func VolumeNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchVolume {
+ if !errors.Is(err, define.ErrNoSuchVolume) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func ContainerNotFound(w http.ResponseWriter, name string, err error) {
- switch errors.Cause(err) {
- case define.ErrNoSuchCtr, define.ErrCtrExists:
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrExists) {
Error(w, http.StatusNotFound, err)
- default:
+ } else {
InternalServerError(w, err)
}
}
func ImageNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != storage.ErrImageUnknown {
+ if !errors.Is(err, storage.ErrImageUnknown) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func NetworkNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchNetwork {
+ if !errors.Is(err, define.ErrNoSuchNetwork) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func PodNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchPod {
+ if !errors.Is(err, define.ErrNoSuchPod) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func SessionNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchExecSession {
+ if !errors.Is(err, define.ErrNoSuchExecSession) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func SecretNotFound(w http.ResponseWriter, nameOrID string, err error) {
- if errors.Cause(err).Error() != "no such secret" {
+ if errorhandling.Cause(err).Error() != "no such secret" {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
@@ -92,7 +92,7 @@ func InternalServerError(w http.ResponseWriter, err error) {
}
func BadRequest(w http.ResponseWriter, key string, value string, err error) {
- e := errors.Wrapf(err, "failed to parse query parameter '%s': %q", key, value)
+ e := fmt.Errorf("failed to parse query parameter '%s': %q: %w", key, value, err)
Error(w, http.StatusBadRequest, e)
}
diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go
index 338d5a84b..9562ebbbc 100644
--- a/pkg/api/handlers/utils/handler.go
+++ b/pkg/api/handlers/utils/handler.go
@@ -1,6 +1,7 @@
package utils
import (
+ "errors"
"fmt"
"io"
"net/http"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/version"
"github.com/gorilla/mux"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -42,11 +42,11 @@ func SupportedVersion(r *http.Request, condition string) (semver.Version, error)
}
safeVal, err := url.PathUnescape(val)
if err != nil {
- return version, errors.Wrapf(err, "unable to unescape given API version: %q", val)
+ return version, fmt.Errorf("unable to unescape given API version: %q: %w", val, err)
}
version, err = semver.ParseTolerant(safeVal)
if err != nil {
- return version, errors.Wrapf(err, "unable to parse given API version: %q from %q", safeVal, val)
+ return version, fmt.Errorf("unable to parse given API version: %q from %q: %w", safeVal, val, err)
}
inRange, err := semver.ParseRange(condition)
@@ -178,7 +178,7 @@ func GetVar(r *http.Request, k string) string {
val := mux.Vars(r)[k]
safeVal, err := url.PathUnescape(val)
if err != nil {
- logrus.Error(errors.Wrapf(err, "failed to unescape mux key %s, value %s", k, val))
+ logrus.Error(fmt.Errorf("failed to unescape mux key %s, value %s: %w", k, val, err))
return val
}
return safeVal
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index 7154f5616..357fa2990 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -1,6 +1,7 @@
package utils
import (
+ "errors"
"fmt"
"net/http"
"strings"
@@ -15,7 +16,6 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage"
"github.com/docker/distribution/reference"
- "github.com/pkg/errors"
)
// NormalizeToDockerHub normalizes the specified nameOrID to Docker Hub if the
@@ -26,21 +26,26 @@ func NormalizeToDockerHub(r *http.Request, nameOrID string) (string, error) {
return nameOrID, nil
}
- // Try to lookup the input to figure out if it was an ID or not.
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
- img, _, err := runtime.LibimageRuntime().LookupImage(nameOrID, nil)
+
+ // The candidate may resolve to a local non-Docker Hub image, such as
+ // 'busybox' -> 'registry.com/busybox'.
+ img, candidate, err := runtime.LibimageRuntime().LookupImage(nameOrID, nil)
if err != nil {
- if errors.Cause(err) != storage.ErrImageUnknown {
+ if !errors.Is(err, storage.ErrImageUnknown) {
return "", fmt.Errorf("normalizing name for compat API: %v", err)
}
+ // If the image could not be resolved locally, set the
+ // candidate back to the input.
+ candidate = nameOrID
} else if strings.HasPrefix(img.ID(), strings.TrimPrefix(nameOrID, "sha256:")) {
return img.ID(), nil
}
// No ID, so we can normalize.
- named, err := reference.ParseNormalizedNamed(nameOrID)
+ named, err := reference.ParseNormalizedNamed(candidate)
if err != nil {
- return "", fmt.Errorf("normalizing name for compat API: %v", err)
+ return "", fmt.Errorf("normalizing name %q (orig: %q) for compat API: %v", candidate, nameOrID, err)
}
return named.String(), nil
@@ -63,12 +68,12 @@ func IsRegistryReference(name string) error {
imageRef, err := alltransports.ParseImageName(name)
if err != nil {
// No supported transport -> assume a docker-stype reference.
- return nil // nolint: nilerr
+ return nil //nolint: nilerr
}
if imageRef.Transport().Name() == docker.Transport.Name() {
return nil
}
- return errors.Errorf("unsupported transport %s in %q: only docker transport is supported", imageRef.Transport().Name(), name)
+ return fmt.Errorf("unsupported transport %s in %q: only docker transport is supported", imageRef.Transport().Name(), name)
}
// ParseStorageReference parses the specified image name to a
@@ -78,12 +83,12 @@ func ParseStorageReference(name string) (types.ImageReference, error) {
storagePrefix := storageTransport.Transport.Name()
imageRef, err := alltransports.ParseImageName(name)
if err == nil && imageRef.Transport().Name() != docker.Transport.Name() {
- return nil, errors.Errorf("reference %q must be a storage reference", name)
+ return nil, fmt.Errorf("reference %q must be a storage reference", name)
} else if err != nil {
origErr := err
imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s:%s", storagePrefix, name))
if err != nil {
- return nil, errors.Wrapf(origErr, "reference %q must be a storage reference", name)
+ return nil, fmt.Errorf("reference %q must be a storage reference: %w", name, origErr)
}
}
return imageRef, nil
diff --git a/pkg/api/server/listener_api.go b/pkg/api/server/listener_api.go
index 2d02df7dc..d1f3ea7a3 100644
--- a/pkg/api/server/listener_api.go
+++ b/pkg/api/server/listener_api.go
@@ -1,31 +1,30 @@
package server
import (
+ "fmt"
"net"
"os"
"path/filepath"
-
- "github.com/pkg/errors"
)
// ListenUnix follows stdlib net.Listen() API, providing a unix listener for given path
// ListenUnix will delete and create files/directories as needed
func ListenUnix(network string, path string) (net.Listener, error) {
- // setup custom listener for API server
+ // set up custom listener for API server
err := os.MkdirAll(filepath.Dir(path), 0770)
if err != nil {
- return nil, errors.Wrapf(err, "api.ListenUnix() failed to create %s", filepath.Dir(path))
+ return nil, fmt.Errorf("api.ListenUnix() failed to create %s: %w", filepath.Dir(path), err)
}
os.Remove(path)
listener, err := net.Listen(network, path)
if err != nil {
- return nil, errors.Wrapf(err, "api.ListenUnix() failed to create net.Listen(%s, %s)", network, path)
+ return nil, fmt.Errorf("api.ListenUnix() failed to create net.Listen(%s, %s): %w", network, path, err)
}
_, err = os.Stat(path)
if err != nil {
- return nil, errors.Wrapf(err, "net.Listen(%s, %s) failed to report the failure to create socket", network, path)
+ return nil, fmt.Errorf("net.Listen(%s, %s) failed to report the failure to create socket: %w", network, path, err)
}
return listener, nil
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 1617a5dd7..a2f46cb35 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -948,6 +948,10 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// name: ignore
// description: Ignore if a specified image does not exist and do not throw an error.
// type: boolean
+ // - in: query
+ // name: lookupManifest
+ // description: Resolves to manifest list instead of image.
+ // type: boolean
// produces:
// - application/json
// responses:
@@ -1615,5 +1619,39 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// 500:
// $ref: "#/responses/internalError"
r.Handle(VersionedPath("/libpod/build"), s.APIHandler(compat.BuildImage)).Methods(http.MethodPost)
+
+ // swagger:operation POST /libpod/images/scp/{name} libpod ImageScpLibpod
+ // ---
+ // tags:
+ // - images
+ // summary: Copy an image from one host to another
+ // description: Copy an image from one host to another
+ // parameters:
+ // - in: path
+ // name: name
+ // required: true
+ // description: source connection/image
+ // type: string
+ // - in: query
+ // name: destination
+ // required: false
+ // description: dest connection/image
+ // type: string
+ // - in: query
+ // name: quiet
+ // required: false
+ // description: quiet output
+ // type: boolean
+ // default: false
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: "#/responses/imagesScpResponseLibpod"
+ // 400:
+ // $ref: "#/responses/badParamError"
+ // 500:
+ // $ref: '#/responses/internalError'
+ r.Handle(VersionedPath("/libpod/images/scp/{name:.*}"), s.APIHandler(libpod.ImageScp)).Methods(http.MethodPost)
return nil
}
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index 7a7e35e8e..5482a8ec2 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -148,7 +148,7 @@ func newServer(runtime *libpod.Runtime, listener net.Listener, opts entities.Ser
if logrus.IsLevelEnabled(logrus.TraceLevel) {
// If in trace mode log request and response bodies
router.Use(loggingHandler())
- router.Walk(func(route *mux.Route, r *mux.Router, ancestors []*mux.Route) error { // nolint
+ _ = router.Walk(func(route *mux.Route, r *mux.Router, ancestors []*mux.Route) error {
path, err := route.GetPathTemplate()
if err != nil {
path = "<N/A>"
diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go
index 419225007..dd934fabd 100644
--- a/pkg/auth/auth.go
+++ b/pkg/auth/auth.go
@@ -3,6 +3,7 @@ package auth
import (
"encoding/base64"
"encoding/json"
+ "fmt"
"io/ioutil"
"net/http"
"os"
@@ -11,7 +12,6 @@ import (
imageAuth "github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/types"
dockerAPITypes "github.com/docker/docker/api/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -47,7 +47,7 @@ func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) {
return nil, "", nil
}
if err != nil {
- return nil, "", errors.Wrapf(err, "failed to parse %q header for %s", headerName, r.URL.String())
+ return nil, "", fmt.Errorf("failed to parse %q header for %s: %w", headerName, r.URL.String(), err)
}
var authFile string
@@ -56,7 +56,7 @@ func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) {
} else {
authFile, err = authConfigsToAuthFile(fileContents)
if err != nil {
- return nil, "", errors.Wrapf(err, "failed to parse %q header for %s", headerName, r.URL.String())
+ return nil, "", fmt.Errorf("failed to parse %q header for %s: %w", headerName, r.URL.String(), err)
}
}
return override, authFile, nil
@@ -72,13 +72,13 @@ func getConfigCredentials(r *http.Request, headers []string) (*types.DockerAuthC
for _, h := range headers {
param, err := base64.URLEncoding.DecodeString(h)
if err != nil {
- return nil, nil, errors.Wrapf(err, "failed to decode %q", xRegistryConfigHeader)
+ return nil, nil, fmt.Errorf("failed to decode %q: %w", xRegistryConfigHeader, err)
}
ac := make(map[string]dockerAPITypes.AuthConfig)
err = json.Unmarshal(param, &ac)
if err != nil {
- return nil, nil, errors.Wrapf(err, "failed to unmarshal %q", xRegistryConfigHeader)
+ return nil, nil, fmt.Errorf("failed to unmarshal %q: %w", xRegistryConfigHeader, err)
}
for k, v := range ac {
@@ -238,15 +238,13 @@ func authConfigsToAuthFile(authConfigs map[string]types.DockerAuthConfig) (strin
return "", err
}
if _, err := tmpFile.Write([]byte{'{', '}'}); err != nil {
- return "", errors.Wrap(err, "error initializing temporary auth file")
+ return "", fmt.Errorf("error initializing temporary auth file: %w", err)
}
if err := tmpFile.Close(); err != nil {
- return "", errors.Wrap(err, "error closing temporary auth file")
+ return "", fmt.Errorf("error closing temporary auth file: %w", err)
}
authFilePath := tmpFile.Name()
- // TODO: It would be nice if c/image could dump the map at once.
- //
// Now use the c/image packages to store the credentials. It's battle
// tested, and we make sure to use the same code as the image backend.
sys := types.SystemContext{AuthFilePath: authFilePath}
@@ -257,7 +255,7 @@ func authConfigsToAuthFile(authConfigs map[string]types.DockerAuthConfig) (strin
// that all credentials are valid. They'll be used on demand
// later.
if err := imageAuth.SetAuthentication(&sys, key, config.Username, config.Password); err != nil {
- return "", errors.Wrapf(err, "error storing credentials in temporary auth file (key: %q / %q, user: %q)", authFileKey, key, config.Username)
+ return "", fmt.Errorf("error storing credentials in temporary auth file (key: %q / %q, user: %q): %w", authFileKey, key, config.Username, err)
}
}
diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go
index 0c795faed..8d9991622 100644
--- a/pkg/autoupdate/autoupdate.go
+++ b/pkg/autoupdate/autoupdate.go
@@ -2,6 +2,7 @@ package autoupdate
import (
"context"
+ "fmt"
"os"
"sort"
@@ -17,7 +18,6 @@ import (
"github.com/containers/podman/v4/pkg/systemd"
systemdDefine "github.com/containers/podman/v4/pkg/systemd/define"
"github.com/coreos/go-systemd/v22/dbus"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -73,7 +73,7 @@ func LookupPolicy(s string) (Policy, error) {
}
sort.Strings(keys)
- return "", errors.Errorf("invalid auto-update policy %q: valid policies are %+q", s, keys)
+ return "", fmt.Errorf("invalid auto-update policy %q: valid policies are %+q", s, keys)
}
// ValidateImageReference checks if the specified imageName is a fully-qualified
@@ -85,17 +85,17 @@ func ValidateImageReference(imageName string) error {
// Make sure the input image is a docker.
imageRef, err := alltransports.ParseImageName(imageName)
if err == nil && imageRef.Transport().Name() != docker.Transport.Name() {
- return errors.Errorf("auto updates require the docker image transport but image is of transport %q", imageRef.Transport().Name())
+ return fmt.Errorf("auto updates require the docker image transport but image is of transport %q", imageRef.Transport().Name())
} else if err != nil {
repo, err := reference.Parse(imageName)
if err != nil {
- return errors.Wrap(err, "enforcing fully-qualified docker transport reference for auto updates")
+ return fmt.Errorf("enforcing fully-qualified docker transport reference for auto updates: %w", err)
}
if _, ok := repo.(reference.NamedTagged); !ok {
- return errors.Errorf("auto updates require fully-qualified image references (no tag): %q", imageName)
+ return fmt.Errorf("auto updates require fully-qualified image references (no tag): %q", imageName)
}
if _, ok := repo.(reference.Digested); ok {
- return errors.Errorf("auto updates require fully-qualified image references without digest: %q", imageName)
+ return fmt.Errorf("auto updates require fully-qualified image references without digest: %q", imageName)
}
}
return nil
@@ -151,7 +151,7 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
for imageID, policyMapper := range containerMap {
image, exists := imageMap[imageID]
if !exists {
- errs = append(errs, errors.Errorf("container image ID %q not found in local storage", imageID))
+ errs = append(errs, fmt.Errorf("container image ID %q not found in local storage", imageID))
return nil, errs
}
@@ -184,13 +184,13 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
cid := ctr.ID()
rawImageName := ctr.RawImageName()
if rawImageName == "" {
- return nil, errors.Errorf("registry auto-updating container %q: raw-image name is empty", cid)
+ return nil, fmt.Errorf("registry auto-updating container %q: raw-image name is empty", cid)
}
labels := ctr.Labels()
unit, exists := labels[systemdDefine.EnvVariable]
if !exists {
- return nil, errors.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
+ return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
}
report := &entities.AutoUpdateReport{
@@ -214,7 +214,7 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
authfile := getAuthfilePath(ctr, options)
needsUpdate, err := newerRemoteImageAvailable(ctx, image, rawImageName, authfile)
if err != nil {
- return report, errors.Wrapf(err, "registry auto-updating container %q: image check for %q failed", cid, rawImageName)
+ return report, fmt.Errorf("registry auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err)
}
if !needsUpdate {
@@ -228,7 +228,7 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
}
if _, err := updateImage(ctx, runtime, rawImageName, authfile); err != nil {
- return report, errors.Wrapf(err, "registry auto-updating container %q: image update for %q failed", cid, rawImageName)
+ return report, fmt.Errorf("registry auto-updating container %q: image update for %q failed: %w", cid, rawImageName, err)
}
updatedRawImages[rawImageName] = true
@@ -245,10 +245,10 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
// To fallback, simply retag the old image and restart the service.
if err := image.Tag(rawImageName); err != nil {
- return report, errors.Wrap(err, "falling back to previous image")
+ return report, fmt.Errorf("falling back to previous image: %w", err)
}
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
- return report, errors.Wrap(err, "restarting unit with old image during fallback")
+ return report, fmt.Errorf("restarting unit with old image during fallback: %w", err)
}
report.Updated = "rolled back"
@@ -260,13 +260,13 @@ func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.C
cid := ctr.ID()
rawImageName := ctr.RawImageName()
if rawImageName == "" {
- return nil, errors.Errorf("locally auto-updating container %q: raw-image name is empty", cid)
+ return nil, fmt.Errorf("locally auto-updating container %q: raw-image name is empty", cid)
}
labels := ctr.Labels()
unit, exists := labels[systemdDefine.EnvVariable]
if !exists {
- return nil, errors.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
+ return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
}
report := &entities.AutoUpdateReport{
@@ -280,7 +280,7 @@ func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.C
needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName)
if err != nil {
- return report, errors.Wrapf(err, "locally auto-updating container %q: image check for %q failed", cid, rawImageName)
+ return report, fmt.Errorf("locally auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err)
}
if !needsUpdate {
@@ -306,10 +306,10 @@ func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.C
// To fallback, simply retag the old image and restart the service.
if err := image.Tag(rawImageName); err != nil {
- return report, errors.Wrap(err, "falling back to previous image")
+ return report, fmt.Errorf("falling back to previous image: %w", err)
}
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
- return report, errors.Wrap(err, "restarting unit with old image during fallback")
+ return report, fmt.Errorf("restarting unit with old image during fallback: %w", err)
}
report.Updated = "rolled back"
@@ -320,7 +320,7 @@ func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.C
func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string, conn *dbus.Conn) error {
restartChan := make(chan string)
if _, err := conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil {
- return errors.Wrapf(err, "auto-updating container %q: restarting systemd unit %q failed", ctr.ID(), unit)
+ return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: %w", ctr.ID(), unit, err)
}
// Wait for the restart to finish and actually check if it was
@@ -333,7 +333,7 @@ func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string,
return nil
default:
- return errors.Errorf("auto-updating container %q: restarting systemd unit %q failed: expected %q but received %q", ctr.ID(), unit, "done", result)
+ return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: expected %q but received %q", ctr.ID(), unit, "done", result)
}
}
diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go
index 3739ec404..7dda955a2 100644
--- a/pkg/bindings/connection.go
+++ b/pkg/bindings/connection.go
@@ -2,6 +2,7 @@ package bindings
import (
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -15,7 +16,6 @@ import (
"github.com/blang/semver"
"github.com/containers/podman/v4/pkg/terminal"
"github.com/containers/podman/v4/version"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
@@ -43,7 +43,7 @@ func GetClient(ctx context.Context) (*Connection, error) {
if c, ok := ctx.Value(clientKey).(*Connection); ok {
return c, nil
}
- return nil, errors.Errorf("%s not set in context", clientKey)
+ return nil, fmt.Errorf("%s not set in context", clientKey)
}
// ServiceVersion from context build by NewConnection()
@@ -92,10 +92,10 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string)
_url, err := url.Parse(uri)
if err != nil {
- return nil, errors.Wrapf(err, "Value of CONTAINER_HOST is not a valid url: %s", uri)
+ return nil, fmt.Errorf("value of CONTAINER_HOST is not a valid url: %s: %w", uri, err)
}
- // Now we setup the http Client to use the connection above
+ // Now we set up the http Client to use the connection above
var connection Connection
switch _url.Scheme {
case "ssh":
@@ -117,16 +117,16 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string)
}
connection = tcpClient(_url)
default:
- return nil, errors.Errorf("unable to create connection. %q is not a supported schema", _url.Scheme)
+ return nil, fmt.Errorf("unable to create connection. %q is not a supported schema", _url.Scheme)
}
if err != nil {
- return nil, errors.Wrapf(err, "unable to connect to Podman. failed to create %sClient", _url.Scheme)
+ return nil, fmt.Errorf("unable to connect to Podman. failed to create %sClient: %w", _url.Scheme, err)
}
ctx = context.WithValue(ctx, clientKey, &connection)
serviceVersion, err := pingNewConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "unable to connect to Podman socket")
+ return nil, fmt.Errorf("unable to connect to Podman socket: %w", err)
}
ctx = context.WithValue(ctx, versionKey, serviceVersion)
return ctx, nil
@@ -164,7 +164,7 @@ func pingNewConnection(ctx context.Context) (*semver.Version, error) {
if response.StatusCode == http.StatusOK {
versionHdr := response.Header.Get("Libpod-API-Version")
if versionHdr == "" {
- logrus.Info("Service did not provide Libpod-API-Version Header")
+ logrus.Warn("Service did not provide Libpod-API-Version Header")
return new(semver.Version), nil
}
versionSrv, err := semver.ParseTolerant(versionHdr)
@@ -177,11 +177,11 @@ func pingNewConnection(ctx context.Context) (*semver.Version, error) {
// Server's job when Client version is equal or older
return &versionSrv, nil
case 1:
- return nil, errors.Errorf("server API version is too old. Client %q server %q",
+ return nil, fmt.Errorf("server API version is too old. Client %q server %q",
version.APIVersion[version.Libpod][version.MinimalAPI].String(), versionSrv.String())
}
}
- return nil, errors.Errorf("ping response was %d", response.StatusCode)
+ return nil, fmt.Errorf("ping response was %d", response.StatusCode)
}
func sshClient(_url *url.URL, secure bool, passPhrase string, identity string) (Connection, error) {
@@ -193,7 +193,7 @@ func sshClient(_url *url.URL, secure bool, passPhrase string, identity string) (
if len(identity) > 0 {
s, err := terminal.PublicKey(identity, []byte(passPhrase))
if err != nil {
- return Connection{}, errors.Wrapf(err, "failed to parse identity %q", identity)
+ return Connection{}, fmt.Errorf("failed to parse identity %q: %w", identity, err)
}
signers = append(signers, s)
@@ -289,7 +289,7 @@ func sshClient(_url *url.URL, secure bool, passPhrase string, identity string) (
},
)
if err != nil {
- return Connection{}, errors.Wrapf(err, "connection to bastion host (%s) failed", _url.String())
+ return Connection{}, fmt.Errorf("connection to bastion host (%s) failed: %w", _url.String(), err)
}
connection := Connection{URI: _url}
@@ -315,7 +315,8 @@ func unixClient(_url *url.URL) Connection {
return connection
}
-// DoRequest assembles the http request and returns the response
+// DoRequest assembles the http request and returns the response.
+// The caller must close the response body.
func (c *Connection) DoRequest(ctx context.Context, httpBody io.Reader, httpMethod, endpoint string, queryParams url.Values, headers http.Header, pathValues ...string) (*APIResponse, error) {
var (
err error
@@ -361,7 +362,7 @@ func (c *Connection) DoRequest(ctx context.Context, httpBody io.Reader, httpMeth
// Give the Do three chances in the case of a comm/service hiccup
for i := 1; i <= 3; i++ {
- response, err = c.Client.Do(req) // nolint
+ response, err = c.Client.Do(req) //nolint:bodyclose // The caller has to close the body.
if err == nil {
break
}
@@ -378,7 +379,7 @@ func (c *Connection) GetDialer(ctx context.Context) (net.Conn, error) {
return transport.DialContext(ctx, c.URI.Scheme, c.URI.String())
}
- return nil, errors.New("Unable to get dial context")
+ return nil, errors.New("unable to get dial context")
}
// IsInformational returns true if the response code is 1xx
diff --git a/pkg/bindings/containers/archive.go b/pkg/bindings/containers/archive.go
index 4f4b5a36a..660d9da6b 100644
--- a/pkg/bindings/containers/archive.go
+++ b/pkg/bindings/containers/archive.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "errors"
"io"
"net/http"
"net/url"
@@ -9,7 +10,6 @@ import (
"github.com/containers/podman/v4/pkg/bindings"
"github.com/containers/podman/v4/pkg/copy"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
)
// Stat checks if the specified path is on the container. Note that the stat
@@ -55,8 +55,6 @@ func CopyFromArchive(ctx context.Context, nameOrID string, path string, reader i
}
// CopyFromArchiveWithOptions copy files into container
-//
-// FIXME: remove this function and make CopyFromArchive accept the option as the last parameter in podman 4.0
func CopyFromArchiveWithOptions(ctx context.Context, nameOrID string, path string, reader io.Reader, options *CopyOptions) (entities.ContainerCopyFunc, error) {
conn, err := bindings.GetClient(ctx)
if err != nil {
diff --git a/pkg/bindings/containers/attach.go b/pkg/bindings/containers/attach.go
index d84b47052..e23ee5ee9 100644
--- a/pkg/bindings/containers/attach.go
+++ b/pkg/bindings/containers/attach.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
+ "errors"
"fmt"
"io"
"net"
@@ -14,11 +15,10 @@ import (
"strconv"
"time"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/bindings"
- "github.com/containers/podman/v4/utils"
"github.com/moby/term"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
terminal "golang.org/x/term"
)
@@ -54,8 +54,6 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri
stderr = (io.Writer)(nil)
}
- logrus.Infof("Going to attach to container %q", nameOrID)
-
conn, err := bindings.GetClient(ctx)
if err != nil {
return err
@@ -77,7 +75,7 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri
detachKeysInBytes, err = term.ToBytes(options.GetDetachKeys())
if err != nil {
- return errors.Wrapf(err, "invalid detach keys")
+ return fmt.Errorf("invalid detach keys: %w", err)
}
}
if isSet.stdin {
@@ -161,7 +159,7 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri
go func() {
logrus.Debugf("Copying STDIN to socket")
- _, err := utils.CopyDetachable(socket, stdin, detachKeysInBytes)
+ _, err := util.CopyDetachable(socket, stdin, detachKeysInBytes)
if err != nil && err != define.ErrDetach {
logrus.Errorf("Failed to write input to service: %v", err)
}
@@ -263,7 +261,7 @@ func DemuxHeader(r io.Reader, buffer []byte) (fd, sz int, err error) {
fd = int(buffer[0])
if fd < 0 || fd > 3 {
- err = errors.Wrapf(ErrLostSync, fmt.Sprintf(`channel "%d" found, 0-3 supported`, fd))
+ err = fmt.Errorf(`channel "%d" found, 0-3 supported: %w`, fd, ErrLostSync)
return
}
@@ -357,7 +355,7 @@ func attachHandleResize(ctx, winCtx context.Context, winChange chan os.Signal, i
resizeErr = ResizeContainerTTY(ctx, id, new(ResizeTTYOptions).WithHeight(h).WithWidth(w))
}
if resizeErr != nil {
- logrus.Infof("Failed to resize TTY: %v", resizeErr)
+ logrus.Debugf("Failed to resize TTY: %v", resizeErr)
}
}
@@ -499,7 +497,7 @@ func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStar
if options.GetAttachInput() {
go func() {
logrus.Debugf("Copying STDIN to socket")
- _, err := utils.CopyDetachable(socket, options.InputStream, []byte{})
+ _, err := util.CopyDetachable(socket, options.InputStream, []byte{})
if err != nil {
logrus.Errorf("Failed to write input to service: %v", err)
}
@@ -520,7 +518,7 @@ func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStar
return fmt.Errorf("exec session %s has a terminal and must have STDOUT enabled", sessionID)
}
// If not multiplex'ed, read from server and write to stdout
- _, err := utils.CopyDetachable(options.GetOutputStream(), socket, []byte{})
+ _, err := util.CopyDetachable(options.GetOutputStream(), socket, []byte{})
if err != nil {
return err
}
diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go
index be421cc8b..80ec7bc6f 100644
--- a/pkg/bindings/containers/containers.go
+++ b/pkg/bindings/containers/containers.go
@@ -2,6 +2,8 @@ package containers
import (
"context"
+ "errors"
+ "fmt"
"io"
"net/http"
"net/url"
@@ -12,8 +14,6 @@ import (
"github.com/containers/podman/v4/pkg/bindings"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
var (
@@ -25,7 +25,7 @@ var (
// the most recent number of containers. The pod and size booleans indicate that pod information and rootfs
// size information should also be included. Finally, the sync bool synchronizes the OCI runtime and
// container state.
-func List(ctx context.Context, options *ListOptions) ([]entities.ListContainer, error) { // nolint:typecheck
+func List(ctx context.Context, options *ListOptions) ([]entities.ListContainer, error) {
if options == nil {
options = new(ListOptions)
}
@@ -201,7 +201,6 @@ func Start(ctx context.Context, nameOrID string, options *StartOptions) error {
if options == nil {
options = new(StartOptions)
}
- logrus.Infof("Going to start container %q", nameOrID)
conn, err := bindings.GetClient(ctx)
if err != nil {
return err
@@ -339,7 +338,7 @@ func Unpause(ctx context.Context, nameOrID string, options *UnpauseOptions) erro
// Wait blocks until the given container reaches a condition. If not provided, the condition will
// default to stopped. If the condition is stopped, an exit code for the container will be provided. The
// nameOrID can be a container name or a partial/full ID.
-func Wait(ctx context.Context, nameOrID string, options *WaitOptions) (int32, error) { // nolint
+func Wait(ctx context.Context, nameOrID string, options *WaitOptions) (int32, error) {
if options == nil {
options = new(WaitOptions)
}
@@ -449,7 +448,7 @@ func ContainerInit(ctx context.Context, nameOrID string, options *InitOptions) e
defer response.Body.Close()
if response.StatusCode == http.StatusNotModified {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", nameOrID)
+ return fmt.Errorf("container %s has already been created in runtime: %w", nameOrID, define.ErrCtrStateInvalid)
}
return response.Process(nil)
}
diff --git a/pkg/bindings/containers/exec.go b/pkg/bindings/containers/exec.go
index 3ad5d67d2..3d19fb812 100644
--- a/pkg/bindings/containers/exec.go
+++ b/pkg/bindings/containers/exec.go
@@ -3,6 +3,8 @@ package containers
import (
"bytes"
"context"
+ "errors"
+ "fmt"
"net/http"
"strings"
@@ -11,7 +13,6 @@ import (
"github.com/containers/podman/v4/pkg/bindings"
"github.com/containers/podman/v4/pkg/domain/entities"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -27,12 +28,12 @@ func ExecCreate(ctx context.Context, nameOrID string, config *handlers.ExecCreat
}
if config == nil {
- return "", errors.Errorf("must provide a configuration for exec session")
+ return "", errors.New("must provide a configuration for exec session")
}
requestJSON, err := json.Marshal(config)
if err != nil {
- return "", errors.Wrapf(err, "error marshalling exec config to JSON")
+ return "", fmt.Errorf("error marshalling exec config to JSON: %w", err)
}
jsonReader := strings.NewReader(string(requestJSON))
diff --git a/pkg/bindings/containers/logs.go b/pkg/bindings/containers/logs.go
index 8ea8ed7fa..9ebfd90da 100644
--- a/pkg/bindings/containers/logs.go
+++ b/pkg/bindings/containers/logs.go
@@ -2,13 +2,13 @@ package containers
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
"strconv"
"github.com/containers/podman/v4/pkg/bindings"
- "github.com/pkg/errors"
)
// Logs obtains a container's logs given the options provided. The logs are then sent to the
diff --git a/pkg/bindings/containers/types.go b/pkg/bindings/containers/types.go
index 81d491bb7..f640ba756 100644
--- a/pkg/bindings/containers/types.go
+++ b/pkg/bindings/containers/types.go
@@ -287,4 +287,7 @@ type CopyOptions struct {
Chown *bool `schema:"copyUIDGID"`
// Map to translate path names.
Rename map[string]string
+ // NoOverwriteDirNonDir when true prevents an existing directory or file from being overwritten
+ // by the other type.
+ NoOverwriteDirNonDir *bool
}
diff --git a/pkg/bindings/containers/types_copy_options.go b/pkg/bindings/containers/types_copy_options.go
index 8fcfe71a6..e43d79752 100644
--- a/pkg/bindings/containers/types_copy_options.go
+++ b/pkg/bindings/containers/types_copy_options.go
@@ -46,3 +46,18 @@ func (o *CopyOptions) GetRename() map[string]string {
}
return o.Rename
}
+
+// WithNoOverwriteDirNonDir set field NoOverwriteDirNonDir to given value
+func (o *CopyOptions) WithNoOverwriteDirNonDir(value bool) *CopyOptions {
+ o.NoOverwriteDirNonDir = &value
+ return o
+}
+
+// GetNoOverwriteDirNonDir returns value of field NoOverwriteDirNonDir
+func (o *CopyOptions) GetNoOverwriteDirNonDir() bool {
+ if o.NoOverwriteDirNonDir == nil {
+ var z bool
+ return z
+ }
+ return *o.NoOverwriteDirNonDir
+}
diff --git a/pkg/bindings/errors.go b/pkg/bindings/errors.go
index eb95764ba..29f087c22 100644
--- a/pkg/bindings/errors.go
+++ b/pkg/bindings/errors.go
@@ -2,10 +2,11 @@ package bindings
import (
"encoding/json"
+ "errors"
+ "fmt"
"io/ioutil"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
)
var (
@@ -30,7 +31,7 @@ func (h APIResponse) Process(unmarshalInto interface{}) error {
func (h APIResponse) ProcessWithError(unmarshalInto interface{}, unmarshalErrorInto interface{}) error {
data, err := ioutil.ReadAll(h.Response.Body)
if err != nil {
- return errors.Wrap(err, "unable to process API response")
+ return fmt.Errorf("unable to process API response: %w", err)
}
if h.IsSuccess() || h.IsRedirection() {
if unmarshalInto != nil {
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
index 51dcd2aa5..6883585e2 100644
--- a/pkg/bindings/images/build.go
+++ b/pkg/bindings/images/build.go
@@ -5,6 +5,7 @@ import (
"compress/gzip"
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/fs"
@@ -28,7 +29,6 @@ import (
"github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -65,6 +65,14 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
params.Set("annotations", l)
}
+ if cppflags := options.CPPFlags; len(cppflags) > 0 {
+ l, err := jsoniter.MarshalToString(cppflags)
+ if err != nil {
+ return nil, err
+ }
+ params.Set("cppflags", l)
+ }
+
if options.AllPlatforms {
params.Add("allplatforms", "1")
}
@@ -73,6 +81,13 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
for _, tag := range options.AdditionalTags {
params.Add("t", tag)
}
+ if additionalBuildContexts := options.AdditionalBuildContexts; len(additionalBuildContexts) > 0 {
+ additionalBuildContextMap, err := jsoniter.Marshal(additionalBuildContexts)
+ if err != nil {
+ return nil, err
+ }
+ params.Set("additionalbuildcontexts", string(additionalBuildContextMap))
+ }
if buildArgs := options.Args; len(buildArgs) > 0 {
bArgs, err := jsoniter.MarshalToString(buildArgs)
if err != nil {
@@ -155,6 +170,11 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
} else {
params.Set("rm", "0")
}
+ if options.CommonBuildOpts.OmitHistory {
+ params.Set("omithistory", "1")
+ } else {
+ params.Set("omithistory", "0")
+ }
if len(options.From) > 0 {
params.Set("from", options.From)
}
@@ -523,14 +543,14 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
if err := dec.Decode(&s); err != nil {
if errors.Is(err, io.ErrUnexpectedEOF) {
- return nil, errors.Wrap(err, "server probably quit")
+ return nil, fmt.Errorf("server probably quit: %w", err)
}
// EOF means the stream is over in which case we need
// to have read the id.
if errors.Is(err, io.EOF) && id != "" {
break
}
- return &entities.BuildReport{ID: id}, errors.Wrap(err, "decoding stream")
+ return &entities.BuildReport{ID: id}, fmt.Errorf("decoding stream: %w", err)
}
switch {
@@ -554,11 +574,11 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
func nTar(excludes []string, sources ...string) (io.ReadCloser, error) {
pm, err := fileutils.NewPatternMatcher(excludes)
if err != nil {
- return nil, errors.Wrapf(err, "error processing excludes list %v", excludes)
+ return nil, fmt.Errorf("error processing excludes list %v: %w", excludes, err)
}
if len(sources) == 0 {
- return nil, errors.New("No source(s) provided for build")
+ return nil, errors.New("no source(s) provided for build")
}
pr, pw := io.Pipe()
@@ -601,9 +621,9 @@ func nTar(excludes []string, sources ...string) (io.ReadCloser, error) {
}
name := filepath.ToSlash(strings.TrimPrefix(path, s+string(filepath.Separator)))
- excluded, err := pm.Matches(name) // nolint:staticcheck
+ excluded, err := pm.Matches(name) //nolint:staticcheck
if err != nil {
- return errors.Wrapf(err, "error checking if %q is excluded", name)
+ return fmt.Errorf("error checking if %q is excluded: %w", name, err)
}
if excluded {
// Note: filepath.SkipDir is not possible to use given .dockerignore semantics.
@@ -706,7 +726,7 @@ func parseDockerignore(root string) ([]string, error) {
var dockerIgnoreErr error
ignore, dockerIgnoreErr = ioutil.ReadFile(filepath.Join(root, ".dockerignore"))
if dockerIgnoreErr != nil && !os.IsNotExist(dockerIgnoreErr) {
- return nil, errors.Wrapf(err, "error reading .containerignore: '%s'", root)
+ return nil, fmt.Errorf("error reading .containerignore: '%s': %w", root, err)
}
}
rawexcludes := strings.Split(string(ignore), "\n")
diff --git a/pkg/bindings/images/build_unix.go b/pkg/bindings/images/build_unix.go
index 32e2ba9af..07bb8cbcd 100644
--- a/pkg/bindings/images/build_unix.go
+++ b/pkg/bindings/images/build_unix.go
@@ -11,7 +11,7 @@ import (
func checkHardLink(fi os.FileInfo) (devino, bool) {
st := fi.Sys().(*syscall.Stat_t)
return devino{
- Dev: uint64(st.Dev), // nolint: unconvert
+ Dev: uint64(st.Dev), //nolint: unconvert
Ino: st.Ino,
}, st.Nlink > 1
}
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index 8e3b07929..cd5147629 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -2,6 +2,7 @@ package images
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/pkg/bindings"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
- "github.com/pkg/errors"
)
// Exists a lightweight way to determine if an image exists in local storage. It returns a
@@ -280,7 +280,6 @@ func Push(ctx context.Context, source string, destination string, options *PushO
if err != nil {
return err
}
- // TODO: have a global system context we can pass around (1st argument)
header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword())
if err != nil {
return err
@@ -329,7 +328,6 @@ func Search(ctx context.Context, term string, options *SearchOptions) ([]entitie
params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify()))
}
- // TODO: have a global system context we can pass around (1st argument)
header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, "", "")
if err != nil {
return nil, err
@@ -348,3 +346,23 @@ func Search(ctx context.Context, term string, options *SearchOptions) ([]entitie
return results, nil
}
+
+func Scp(ctx context.Context, source, destination *string, options ScpOptions) (reports.ScpReport, error) {
+ rep := reports.ScpReport{}
+
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return rep, err
+ }
+ params, err := options.ToParams()
+ if err != nil {
+ return rep, err
+ }
+ response, err := conn.DoRequest(ctx, nil, http.MethodPost, fmt.Sprintf("/images/scp/%s", *source), params, nil)
+ if err != nil {
+ return rep, err
+ }
+ defer response.Body.Close()
+
+ return rep, response.Process(&rep)
+}
diff --git a/pkg/bindings/images/pull.go b/pkg/bindings/images/pull.go
index 20e47179c..1a4aa3038 100644
--- a/pkg/bindings/images/pull.go
+++ b/pkg/bindings/images/pull.go
@@ -3,6 +3,7 @@ package images
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -15,7 +16,6 @@ import (
"github.com/containers/podman/v4/pkg/bindings"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
)
// Pull is the binding for libpod's v2 endpoints for pulling images. Note that
@@ -42,7 +42,6 @@ func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string,
params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify()))
}
- // TODO: have a global system context we can pass around (1st argument)
header, err := auth.MakeXRegistryAuthHeader(&types.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword())
if err != nil {
return nil, err
@@ -92,7 +91,7 @@ func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string,
images = report.Images
case report.ID != "":
default:
- return images, errors.Errorf("failed to parse pull results stream, unexpected input: %v", report)
+ return images, fmt.Errorf("failed to parse pull results stream, unexpected input: %v", report)
}
}
return images, errorhandling.JoinErrors(pullErrors)
diff --git a/pkg/bindings/images/rm.go b/pkg/bindings/images/rm.go
index b80bacf45..eb3eef10c 100644
--- a/pkg/bindings/images/rm.go
+++ b/pkg/bindings/images/rm.go
@@ -16,9 +16,6 @@ func Remove(ctx context.Context, images []string, options *RemoveOptions) (*enti
if options == nil {
options = new(RemoveOptions)
}
- // FIXME - bindings tests are missing for this endpoint. Once the CI is
- // re-enabled for bindings, we need to add them. At the time of writing,
- // the tests don't compile.
var report types.LibpodImagesRemoveReport
conn, err := bindings.GetClient(ctx)
if err != nil {
diff --git a/pkg/bindings/images/types.go b/pkg/bindings/images/types.go
index 8e5e7ee92..3728ae5c0 100644
--- a/pkg/bindings/images/types.go
+++ b/pkg/bindings/images/types.go
@@ -13,6 +13,8 @@ type RemoveOptions struct {
Force *bool
// Ignore if a specified image does not exist and do not throw an error.
Ignore *bool
+ // Confirms if given name is a manifest list and removes it, otherwise returns error.
+ LookupManifest *bool
}
//go:generate go run ../generator/generator.go DiffOptions
@@ -127,6 +129,8 @@ type PushOptions struct {
Password *string
// SkipTLSVerify to skip HTTPS and certificate verification.
SkipTLSVerify *bool
+ // RemoveSignatures Discard any pre-existing signatures in the image.
+ RemoveSignatures *bool
// Username for authenticating against the registry.
Username *string
}
@@ -186,3 +190,8 @@ type BuildOptions struct {
// ExistsOptions are optional options for checking if an image exists
type ExistsOptions struct {
}
+
+type ScpOptions struct {
+ Quiet *bool
+ Destination *string
+}
diff --git a/pkg/bindings/images/types_push_options.go b/pkg/bindings/images/types_push_options.go
index 4985c9451..25f6c5546 100644
--- a/pkg/bindings/images/types_push_options.go
+++ b/pkg/bindings/images/types_push_options.go
@@ -107,6 +107,21 @@ func (o *PushOptions) GetSkipTLSVerify() bool {
return *o.SkipTLSVerify
}
+// WithRemoveSignatures set field RemoveSignatures to given value
+func (o *PushOptions) WithRemoveSignatures(value bool) *PushOptions {
+ o.RemoveSignatures = &value
+ return o
+}
+
+// GetRemoveSignatures returns value of field RemoveSignatures
+func (o *PushOptions) GetRemoveSignatures() bool {
+ if o.RemoveSignatures == nil {
+ var z bool
+ return z
+ }
+ return *o.RemoveSignatures
+}
+
// WithUsername set field Username to given value
func (o *PushOptions) WithUsername(value string) *PushOptions {
o.Username = &value
diff --git a/pkg/bindings/images/types_remove_options.go b/pkg/bindings/images/types_remove_options.go
index 613a33183..559ebcfd5 100644
--- a/pkg/bindings/images/types_remove_options.go
+++ b/pkg/bindings/images/types_remove_options.go
@@ -61,3 +61,18 @@ func (o *RemoveOptions) GetIgnore() bool {
}
return *o.Ignore
}
+
+// WithLookupManifest set field LookupManifest to given value
+func (o *RemoveOptions) WithLookupManifest(value bool) *RemoveOptions {
+ o.LookupManifest = &value
+ return o
+}
+
+// GetLookupManifest returns value of field LookupManifest
+func (o *RemoveOptions) GetLookupManifest() bool {
+ if o.LookupManifest == nil {
+ var z bool
+ return z
+ }
+ return *o.LookupManifest
+}
diff --git a/pkg/bindings/images/types_scp_options.go b/pkg/bindings/images/types_scp_options.go
new file mode 100644
index 000000000..5a1178cb1
--- /dev/null
+++ b/pkg/bindings/images/types_scp_options.go
@@ -0,0 +1,12 @@
+package images
+
+import (
+ "net/url"
+
+ "github.com/containers/podman/v4/pkg/bindings/internal/util"
+)
+
+// ToParams formats struct fields to be passed to API service
+func (o *ScpOptions) ToParams() (url.Values, error) {
+ return util.ToParams(o)
+}
diff --git a/pkg/bindings/manifests/manifests.go b/pkg/bindings/manifests/manifests.go
index feff5d6e8..80153c4b4 100644
--- a/pkg/bindings/manifests/manifests.go
+++ b/pkg/bindings/manifests/manifests.go
@@ -2,6 +2,8 @@ package manifests
import (
"context"
+ "errors"
+ "fmt"
"io/ioutil"
"net/http"
"strconv"
@@ -15,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
)
// Create creates a manifest for the given name. Optional images to be associated with
@@ -117,6 +118,26 @@ func Remove(ctx context.Context, name, digest string, _ *RemoveOptions) (string,
return Modify(ctx, name, []string{digest}, optionsv4)
}
+// Delete removes specified manifest from local storage.
+func Delete(ctx context.Context, name string) (*entities.ManifestRemoveReport, error) {
+ var report entities.ManifestRemoveReport
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ response, err := conn.DoRequest(ctx, nil, http.MethodDelete, "/manifests/%s", nil, nil, name)
+ if err != nil {
+ return nil, err
+ }
+ defer response.Body.Close()
+
+ if err := response.Process(&report); err != nil {
+ return nil, err
+ }
+
+ return &report, errorhandling.JoinErrors(errorhandling.StringsToErrors(report.Errors))
+}
+
// Push takes a manifest list and pushes to a destination. If the destination is not specified,
// the name will be used instead. If the optional all boolean is specified, all images specified
// in the list will be pushed as well.
@@ -199,19 +220,19 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp
data, err := ioutil.ReadAll(response.Body)
if err != nil {
- return "", errors.Wrap(err, "unable to process API response")
+ return "", fmt.Errorf("unable to process API response: %w", err)
}
if response.IsSuccess() || response.IsRedirection() {
var report entities.ManifestModifyReport
if err = jsoniter.Unmarshal(data, &report); err != nil {
- return "", errors.Wrap(err, "unable to decode API response")
+ return "", fmt.Errorf("unable to decode API response: %w", err)
}
err = errorhandling.JoinErrors(report.Errors)
if err != nil {
errModel := errorhandling.ErrorModel{
- Because: (errors.Cause(err)).Error(),
+ Because: errorhandling.Cause(err).Error(),
Message: err.Error(),
ResponseCode: response.StatusCode,
}
@@ -224,7 +245,7 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp
ResponseCode: response.StatusCode,
}
if err = jsoniter.Unmarshal(data, &errModel); err != nil {
- return "", errors.Wrap(err, "unable to decode API response")
+ return "", fmt.Errorf("unable to decode API response: %w", err)
}
return "", &errModel
}
diff --git a/pkg/bindings/manifests/types.go b/pkg/bindings/manifests/types.go
index d0b0b2e71..e23ef798d 100644
--- a/pkg/bindings/manifests/types.go
+++ b/pkg/bindings/manifests/types.go
@@ -44,16 +44,18 @@ type RemoveOptions struct {
type ModifyOptions struct {
// Operation values are "update", "remove" and "annotate". This allows the service to
// efficiently perform each update on a manifest list.
- Operation *string
- All *bool // All when true, operate on all images in a manifest list that may be included in Images
- Annotations map[string]string // Annotations to add to manifest list
- Arch *string // Arch overrides the architecture for the image
- Features []string // Feature list for the image
- Images []string // Images is an optional list of images to add/remove to/from manifest list depending on operation
- OS *string // OS overrides the operating system for the image
- OSFeatures []string // OS features for the image
- OSVersion *string // OSVersion overrides the operating system for the image
- Variant *string // Variant overrides the operating system variant for the image
+ Operation *string
+ All *bool // All when true, operate on all images in a manifest list that may be included in Images
+ Annotations map[string]string // Annotations to add to manifest list
+ Arch *string // Arch overrides the architecture for the image
+ Features []string // Feature list for the image
+ Images []string // Images is an optional list of images to add/remove to/from manifest list depending on operation
+ OS *string // OS overrides the operating system for the image
+ // OS features for the image
+ OSFeatures []string `json:"os_features" schema:"os_features"`
+ // OSVersion overrides the operating system for the image
+ OSVersion *string `json:"os_version" schema:"os_version"`
+ Variant *string // Variant overrides the operating system variant for the image
Authfile *string
Password *string
Username *string
diff --git a/pkg/bindings/manifests/types_modify_options.go b/pkg/bindings/manifests/types_modify_options.go
index 9d2ed2613..ab00cb2c5 100644
--- a/pkg/bindings/manifests/types_modify_options.go
+++ b/pkg/bindings/manifests/types_modify_options.go
@@ -122,13 +122,13 @@ func (o *ModifyOptions) GetOS() string {
return *o.OS
}
-// WithOSFeatures set oS features for the image
+// WithOSFeatures set field OSFeatures to given value
func (o *ModifyOptions) WithOSFeatures(value []string) *ModifyOptions {
o.OSFeatures = value
return o
}
-// GetOSFeatures returns value of oS features for the image
+// GetOSFeatures returns value of field OSFeatures
func (o *ModifyOptions) GetOSFeatures() []string {
if o.OSFeatures == nil {
var z []string
@@ -137,13 +137,13 @@ func (o *ModifyOptions) GetOSFeatures() []string {
return o.OSFeatures
}
-// WithOSVersion set oSVersion overrides the operating system for the image
+// WithOSVersion set field OSVersion to given value
func (o *ModifyOptions) WithOSVersion(value string) *ModifyOptions {
o.OSVersion = &value
return o
}
-// GetOSVersion returns value of oSVersion overrides the operating system for the image
+// GetOSVersion returns value of field OSVersion
func (o *ModifyOptions) GetOSVersion() string {
if o.OSVersion == nil {
var z string
diff --git a/pkg/bindings/play/play.go b/pkg/bindings/play/play.go
index 8058a8514..0261b0250 100644
--- a/pkg/bindings/play/play.go
+++ b/pkg/bindings/play/play.go
@@ -46,7 +46,6 @@ func KubeWithBody(ctx context.Context, body io.Reader, options *KubeOptions) (*e
params.Set("start", strconv.FormatBool(options.GetStart()))
}
- // TODO: have a global system context we can pass around (1st argument)
header, err := auth.MakeXRegistryAuthHeader(&types.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword())
if err != nil {
return nil, err
diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go
index 5ef78e444..dae80384b 100644
--- a/pkg/bindings/system/system.go
+++ b/pkg/bindings/system/system.go
@@ -3,6 +3,7 @@ package system
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -11,7 +12,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/bindings"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -37,7 +37,7 @@ func Events(ctx context.Context, eventChan chan entities.Event, cancelChan chan
go func() {
<-cancelChan
err = response.Body.Close()
- logrus.Error(errors.Wrap(err, "unable to close event response body"))
+ logrus.Errorf("Unable to close event response body: %v", err)
}()
}
@@ -56,7 +56,7 @@ func Events(ctx context.Context, eventChan chan entities.Event, cancelChan chan
case errors.Is(err, io.EOF):
return nil
default:
- return errors.Wrap(err, "unable to decode event response")
+ return fmt.Errorf("unable to decode event response: %w", err)
}
}
diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go
index 950fd21e6..6b0175f59 100644
--- a/pkg/bindings/test/common_test.go
+++ b/pkg/bindings/test/common_test.go
@@ -16,7 +16,6 @@ import (
"github.com/containers/podman/v4/pkg/specgen"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega/gexec"
- "github.com/pkg/errors"
)
type testImage struct {
@@ -127,7 +126,7 @@ func (b *bindingTest) runPodman(command []string) *gexec.Session {
fmt.Printf("Running: %s %s\n", podmanBinary, strings.Join(cmd, " "))
session, err := gexec.Start(c, ginkgo.GinkgoWriter, ginkgo.GinkgoWriter)
if err != nil {
- panic(errors.Errorf("unable to run podman command: %q", cmd))
+ panic(fmt.Errorf("unable to run podman command: %q", cmd))
}
return session
}
diff --git a/pkg/bindings/test/manifests_test.go b/pkg/bindings/test/manifests_test.go
index e6c93817d..6a34ef5a6 100644
--- a/pkg/bindings/test/manifests_test.go
+++ b/pkg/bindings/test/manifests_test.go
@@ -62,6 +62,19 @@ var _ = Describe("podman manifest", func() {
Expect(len(list.Manifests)).To(BeNumerically("==", 1))
})
+ It("delete manifest", func() {
+ id, err := manifests.Create(bt.conn, "quay.io/libpod/foobar:latest", []string{}, nil)
+ Expect(err).ToNot(HaveOccurred(), err)
+ list, err := manifests.Inspect(bt.conn, id, nil)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(len(list.Manifests)).To(BeZero())
+
+ removeReport, err := manifests.Delete(bt.conn, "quay.io/libpod/foobar:latest")
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(removeReport.Deleted)).To(BeNumerically("==", 1))
+ })
+
It("inspect", func() {
_, err := manifests.Inspect(bt.conn, "larry", nil)
Expect(err).To(HaveOccurred())
diff --git a/pkg/channel/writer.go b/pkg/channel/writer.go
index ecb68e906..f17486075 100644
--- a/pkg/channel/writer.go
+++ b/pkg/channel/writer.go
@@ -1,10 +1,9 @@
package channel
import (
+ "errors"
"io"
"sync"
-
- "github.com/pkg/errors"
)
// WriteCloser is an io.WriteCloser that that proxies Write() calls to a channel
diff --git a/pkg/checkpoint/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go
index 396b521a1..e7c843143 100644
--- a/pkg/checkpoint/checkpoint_restore.go
+++ b/pkg/checkpoint/checkpoint_restore.go
@@ -2,6 +2,8 @@ package checkpoint
import (
"context"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
@@ -16,7 +18,6 @@ import (
"github.com/containers/podman/v4/pkg/specgen/generate"
"github.com/containers/podman/v4/pkg/specgenutil"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -65,7 +66,7 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
// This should not happen as checkpoints with these options are not exported.
if len(ctrConfig.Dependencies) > 0 {
- return nil, errors.Errorf("Cannot import checkpoints of containers with dependencies")
+ return nil, errors.New("cannot import checkpoints of containers with dependencies")
}
// Volumes included in the checkpoint should not exist
@@ -76,7 +77,7 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
return nil, err
}
if exists {
- return nil, errors.Errorf("volume with name %s already exists. Use --ignore-volumes to not restore content of volumes", vol.Name)
+ return nil, fmt.Errorf("volume with name %s already exists. Use --ignore-volumes to not restore content of volumes", vol.Name)
}
}
}
@@ -106,11 +107,11 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
if restoreOptions.Pod != "" {
// Restoring into a Pod requires much newer versions of CRIU
if !criu.CheckForCriu(criu.PodCriuVersion) {
- return nil, errors.Errorf("restoring containers into pods requires at least CRIU %d", criu.PodCriuVersion)
+ return nil, fmt.Errorf("restoring containers into pods requires at least CRIU %d", criu.PodCriuVersion)
}
// The runtime also has to support it
if !crutils.CRRuntimeSupportsPodCheckpointRestore(runtime.GetOCIRuntimePath()) {
- return nil, errors.Errorf("runtime %s does not support pod restore", runtime.GetOCIRuntimePath())
+ return nil, fmt.Errorf("runtime %s does not support pod restore", runtime.GetOCIRuntimePath())
}
// Restoring into an existing Pod
ctrConfig.Pod = restoreOptions.Pod
@@ -120,12 +121,12 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
// Let's make sure we a restoring into a pod with the same shared namespaces.
pod, err := runtime.LookupPod(ctrConfig.Pod)
if err != nil {
- return nil, errors.Wrapf(err, "pod %q cannot be retrieved", ctrConfig.Pod)
+ return nil, fmt.Errorf("pod %q cannot be retrieved: %w", ctrConfig.Pod, err)
}
infraContainer, err := pod.InfraContainer()
if err != nil {
- return nil, errors.Wrapf(err, "cannot retrieve infra container from pod %q", ctrConfig.Pod)
+ return nil, fmt.Errorf("cannot retrieve infra container from pod %q: %w", ctrConfig.Pod, err)
}
// If a namespaces was shared (!= "") it needs to be set to the new infrastructure container
@@ -133,14 +134,14 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
// container we abort.
if ctrConfig.IPCNsCtr != "" {
if !pod.SharesIPC() {
- return nil, errors.Errorf("pod %s does not share the IPC namespace", ctrConfig.Pod)
+ return nil, fmt.Errorf("pod %s does not share the IPC namespace", ctrConfig.Pod)
}
ctrConfig.IPCNsCtr = infraContainer.ID()
}
if ctrConfig.NetNsCtr != "" {
if !pod.SharesNet() {
- return nil, errors.Errorf("pod %s does not share the network namespace", ctrConfig.Pod)
+ return nil, fmt.Errorf("pod %s does not share the network namespace", ctrConfig.Pod)
}
ctrConfig.NetNsCtr = infraContainer.ID()
for net, opts := range ctrConfig.Networks {
@@ -154,21 +155,21 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
if ctrConfig.PIDNsCtr != "" {
if !pod.SharesPID() {
- return nil, errors.Errorf("pod %s does not share the PID namespace", ctrConfig.Pod)
+ return nil, fmt.Errorf("pod %s does not share the PID namespace", ctrConfig.Pod)
}
ctrConfig.PIDNsCtr = infraContainer.ID()
}
if ctrConfig.UTSNsCtr != "" {
if !pod.SharesUTS() {
- return nil, errors.Errorf("pod %s does not share the UTS namespace", ctrConfig.Pod)
+ return nil, fmt.Errorf("pod %s does not share the UTS namespace", ctrConfig.Pod)
}
ctrConfig.UTSNsCtr = infraContainer.ID()
}
if ctrConfig.CgroupNsCtr != "" {
if !pod.SharesCgroup() {
- return nil, errors.Errorf("pod %s does not share the cgroup namespace", ctrConfig.Pod)
+ return nil, fmt.Errorf("pod %s does not share the cgroup namespace", ctrConfig.Pod)
}
ctrConfig.CgroupNsCtr = infraContainer.ID()
}
@@ -180,7 +181,7 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
// Fix parent cgroup
cgroupPath, err := pod.CgroupPath()
if err != nil {
- return nil, errors.Wrapf(err, "cannot retrieve cgroup path from pod %q", ctrConfig.Pod)
+ return nil, fmt.Errorf("cannot retrieve cgroup path from pod %q: %w", ctrConfig.Pod, err)
}
ctrConfig.CgroupParent = cgroupPath
@@ -228,14 +229,14 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
containerConfig := container.Config()
ctrName := ctrConfig.Name
if containerConfig.Name != ctrName {
- return nil, errors.Errorf("Name of restored container (%s) does not match requested name (%s)", containerConfig.Name, ctrName)
+ return nil, fmt.Errorf("name of restored container (%s) does not match requested name (%s)", containerConfig.Name, ctrName)
}
if !newName {
// Only check ID for a restore with the same name.
// Using -n to request a new name for the restored container, will also create a new ID
if containerConfig.ID != ctrID {
- return nil, errors.Errorf("ID of restored container (%s) does not match requested ID (%s)", containerConfig.ID, ctrID)
+ return nil, fmt.Errorf("ID of restored container (%s) does not match requested ID (%s)", containerConfig.ID, ctrID)
}
}
diff --git a/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
index 76c868cee..1437a09df 100644
--- a/pkg/checkpoint/crutils/checkpoint_restore_utils.go
+++ b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
@@ -2,6 +2,8 @@ package crutils
import (
"bytes"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -12,7 +14,6 @@ import (
"github.com/checkpoint-restore/go-criu/v5/stats"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
)
// This file mainly exist to make the checkpoint/restore functions
@@ -23,7 +24,7 @@ import (
func CRImportCheckpointWithoutConfig(destination, input string) error {
archiveFile, err := os.Open(input)
if err != nil {
- return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
+ return fmt.Errorf("failed to open checkpoint archive %s for import: %w", input, err)
}
defer archiveFile.Close()
@@ -35,7 +36,7 @@ func CRImportCheckpointWithoutConfig(destination, input string) error {
},
}
if err = archive.Untar(archiveFile, destination, options); err != nil {
- return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ return fmt.Errorf("unpacking of checkpoint archive %s failed: %w", input, err)
}
return nil
@@ -47,7 +48,7 @@ func CRImportCheckpointWithoutConfig(destination, input string) error {
func CRImportCheckpointConfigOnly(destination, input string) error {
archiveFile, err := os.Open(input)
if err != nil {
- return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
+ return fmt.Errorf("failed to open checkpoint archive %s for import: %w", input, err)
}
defer archiveFile.Close()
@@ -65,7 +66,7 @@ func CRImportCheckpointConfigOnly(destination, input string) error {
},
}
if err = archive.Untar(archiveFile, destination, options); err != nil {
- return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ return fmt.Errorf("unpacking of checkpoint archive %s failed: %w", input, err)
}
return nil
@@ -81,14 +82,14 @@ func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) erro
}
if err != nil {
- return errors.Wrapf(err, "failed to read deleted files file")
+ return fmt.Errorf("failed to read deleted files file: %w", err)
}
for _, deleteFile := range deletedFiles {
// Using RemoveAll as deletedFiles, which is generated from 'podman diff'
// lists completely deleted directories as a single entry: 'D /root'.
if err := os.RemoveAll(filepath.Join(containerRootDirectory, deleteFile)); err != nil {
- return errors.Wrapf(err, "failed to delete files from container %s during restore", id)
+ return fmt.Errorf("failed to delete files from container %s during restore: %w", id, err)
}
}
@@ -105,12 +106,12 @@ func CRApplyRootFsDiffTar(baseDirectory, containerRootDirectory string) error {
if errors.Is(err, os.ErrNotExist) {
return nil
}
- return errors.Wrap(err, "failed to open root file-system diff file")
+ return fmt.Errorf("failed to open root file-system diff file: %w", err)
}
defer rootfsDiffFile.Close()
if err := archive.Untar(rootfsDiffFile, containerRootDirectory, nil); err != nil {
- return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
+ return fmt.Errorf("failed to apply root file-system diff file %s: %w", rootfsDiffPath, err)
}
return nil
@@ -158,11 +159,11 @@ func CRCreateRootFsDiffTar(changes *[]archive.Change, mountPoint, destination st
IncludeFiles: rootfsIncludeFiles,
})
if err != nil {
- return includeFiles, errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
+ return includeFiles, fmt.Errorf("error exporting root file-system diff to %q: %w", rootfsDiffPath, err)
}
rootfsDiffFile, err := os.Create(rootfsDiffPath)
if err != nil {
- return includeFiles, errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
+ return includeFiles, fmt.Errorf("error creating root file-system diff file %q: %w", rootfsDiffPath, err)
}
defer rootfsDiffFile.Close()
if _, err = io.Copy(rootfsDiffFile, rootfsTar); err != nil {
@@ -195,11 +196,11 @@ func CRCreateFileWithLabel(directory, fileName, fileLabel string) error {
logFile, err := os.OpenFile(logFileName, os.O_CREATE, 0o600)
if err != nil {
- return errors.Wrapf(err, "failed to create file %q", logFileName)
+ return fmt.Errorf("failed to create file %q: %w", logFileName, err)
}
defer logFile.Close()
if err = label.SetFileLabel(logFileName, fileLabel); err != nil {
- return errors.Wrapf(err, "failed to label file %q", logFileName)
+ return fmt.Errorf("failed to label file %q: %w", logFileName, err)
}
return nil
diff --git a/pkg/copy/fileinfo.go b/pkg/copy/fileinfo.go
index 0ccca5b6e..7d4e67896 100644
--- a/pkg/copy/fileinfo.go
+++ b/pkg/copy/fileinfo.go
@@ -3,13 +3,14 @@ package copy
import (
"encoding/base64"
"encoding/json"
+ "errors"
+ "fmt"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
// XDockerContainerPathStatHeader is the *key* in http headers pointing to the
@@ -18,7 +19,7 @@ const XDockerContainerPathStatHeader = "X-Docker-Container-Path-Stat"
// ErrENOENT mimics the stdlib's ErrENOENT and can be used to implement custom logic
// while preserving the user-visible error message.
-var ErrENOENT = errors.New("No such file or directory")
+var ErrENOENT = errors.New("no such file or directory")
// FileInfo describes a file or directory and is returned by
// (*CopyItem).Stat().
@@ -29,7 +30,7 @@ type FileInfo = define.FileInfo
func EncodeFileInfo(info *FileInfo) (string, error) {
buf, err := json.Marshal(&info)
if err != nil {
- return "", errors.Wrap(err, "failed to serialize file stats")
+ return "", fmt.Errorf("failed to serialize file stats: %w", err)
}
return base64.URLEncoding.EncodeToString(buf), nil
}
diff --git a/pkg/copy/parse.go b/pkg/copy/parse.go
index 93edec5fa..50f1d211d 100644
--- a/pkg/copy/parse.go
+++ b/pkg/copy/parse.go
@@ -1,9 +1,8 @@
package copy
import (
+ "fmt"
"strings"
-
- "github.com/pkg/errors"
)
// ParseSourceAndDestination parses the source and destination input into a
@@ -19,7 +18,7 @@ func ParseSourceAndDestination(source, destination string) (string, string, stri
destContainer, destPath := parseUserInput(destination)
if len(sourcePath) == 0 || len(destPath) == 0 {
- return "", "", "", "", errors.Errorf("invalid arguments %q, %q: you must specify paths", source, destination)
+ return "", "", "", "", fmt.Errorf("invalid arguments %q, %q: you must specify paths", source, destination)
}
return sourceContainer, sourcePath, destContainer, destPath, nil
diff --git a/pkg/criu/criu.go b/pkg/criu/criu.go
index 6570159d7..0b0bbff5d 100644
--- a/pkg/criu/criu.go
+++ b/pkg/criu/criu.go
@@ -1,51 +1,8 @@
-//go:build linux
-// +build linux
-
package criu
-import (
- "github.com/checkpoint-restore/go-criu/v5"
- "github.com/checkpoint-restore/go-criu/v5/rpc"
-
- "google.golang.org/protobuf/proto"
-)
-
// MinCriuVersion for Podman at least CRIU 3.11 is required
const MinCriuVersion = 31100
// PodCriuVersion is the version of CRIU needed for
// checkpointing and restoring containers out of and into Pods.
const PodCriuVersion = 31600
-
-// CheckForCriu uses CRIU's go bindings to check if the CRIU
-// binary exists and if it at least the version Podman needs.
-func CheckForCriu(version int) bool {
- c := criu.MakeCriu()
- result, err := c.IsCriuAtLeast(version)
- if err != nil {
- return false
- }
- return result
-}
-
-func GetCriuVestion() (int, error) {
- c := criu.MakeCriu()
- return c.GetCriuVersion()
-}
-
-func MemTrack() bool {
- features, err := criu.MakeCriu().FeatureCheck(
- &rpc.CriuFeatures{
- MemTrack: proto.Bool(true),
- },
- )
- if err != nil {
- return false
- }
-
- if features == nil || features.MemTrack == nil {
- return false
- }
-
- return *features.MemTrack
-}
diff --git a/pkg/criu/criu_linux.go b/pkg/criu/criu_linux.go
new file mode 100644
index 000000000..c28e23fd7
--- /dev/null
+++ b/pkg/criu/criu_linux.go
@@ -0,0 +1,44 @@
+//go:build linux
+// +build linux
+
+package criu
+
+import (
+ "github.com/checkpoint-restore/go-criu/v5"
+ "github.com/checkpoint-restore/go-criu/v5/rpc"
+
+ "google.golang.org/protobuf/proto"
+)
+
+// CheckForCriu uses CRIU's go bindings to check if the CRIU
+// binary exists and if it at least the version Podman needs.
+func CheckForCriu(version int) bool {
+ c := criu.MakeCriu()
+ result, err := c.IsCriuAtLeast(version)
+ if err != nil {
+ return false
+ }
+ return result
+}
+
+func MemTrack() bool {
+ features, err := criu.MakeCriu().FeatureCheck(
+ &rpc.CriuFeatures{
+ MemTrack: proto.Bool(true),
+ },
+ )
+ if err != nil {
+ return false
+ }
+
+ if features == nil || features.MemTrack == nil {
+ return false
+ }
+
+ return *features.MemTrack
+}
+
+func GetCriuVersion() (int, error) {
+ c := criu.MakeCriu()
+ return c.GetCriuVersion()
+}
diff --git a/pkg/criu/criu_unsupported.go b/pkg/criu/criu_unsupported.go
index 3e3ed9c6c..437482a0e 100644
--- a/pkg/criu/criu_unsupported.go
+++ b/pkg/criu/criu_unsupported.go
@@ -3,6 +3,14 @@
package criu
+func CheckForCriu(version int) bool {
+ return false
+}
+
func MemTrack() bool {
return false
}
+
+func GetCriuVersion() (int, error) {
+ return MinCriuVersion, nil
+}
diff --git a/pkg/ctime/ctime_linux.go b/pkg/ctime/ctime_linux.go
index 7eb3caa6d..bf3cd5752 100644
--- a/pkg/ctime/ctime_linux.go
+++ b/pkg/ctime/ctime_linux.go
@@ -11,6 +11,6 @@ import (
func created(fi os.FileInfo) time.Time {
st := fi.Sys().(*syscall.Stat_t)
- //nolint
+ //nolint:unconvert // need to type cast on some cpu architectures
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
}
diff --git a/pkg/domain/entities/container_ps.go b/pkg/domain/entities/container_ps.go
index a5562e7c9..519d2b7da 100644
--- a/pkg/domain/entities/container_ps.go
+++ b/pkg/domain/entities/container_ps.go
@@ -1,13 +1,13 @@
package entities
import (
+ "errors"
"sort"
"strings"
"time"
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/pkg/ps/define"
- "github.com/pkg/errors"
)
// ListContainer describes a container suitable for listing
@@ -166,7 +166,7 @@ func SortPsOutput(sortBy string, psOutput SortListContainers) (SortListContainer
case "pod":
sort.Sort(psSortedPod{psOutput})
default:
- return nil, errors.Errorf("invalid option for --sort, options are: command, created, id, image, names, runningfor, size, or status")
+ return nil, errors.New("invalid option for --sort, options are: command, created, id, image, names, runningfor, size, or status")
}
return psOutput, nil
}
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index 1db8b9951..17408f12f 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -47,8 +47,7 @@ type ContainerRunlabelOptions struct {
}
// ContainerRunlabelReport contains the results from executing container-runlabel.
-type ContainerRunlabelReport struct {
-}
+type ContainerRunlabelReport struct{}
type WaitOptions struct {
Condition []define.ContainerStatus
@@ -57,7 +56,7 @@ type WaitOptions struct {
}
type WaitReport struct {
- Id string //nolint
+ Id string //nolint:revive,stylecheck
Error error
ExitCode int32
}
@@ -77,7 +76,7 @@ type PauseUnPauseOptions struct {
type PauseUnpauseReport struct {
Err error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
}
type StopOptions struct {
@@ -89,7 +88,7 @@ type StopOptions struct {
type StopReport struct {
Err error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
RawInput string
}
@@ -111,7 +110,7 @@ type KillOptions struct {
type KillReport struct {
Err error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
RawInput string
}
@@ -124,7 +123,7 @@ type RestartOptions struct {
type RestartReport struct {
Err error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
}
type RmOptions struct {
@@ -165,10 +164,13 @@ type CopyOptions struct {
Chown bool
// Map to translate path names.
Rename map[string]string
+ // NoOverwriteDirNonDir when true prevents an existing directory or file from being overwritten
+ // by the other type
+ NoOverwriteDirNonDir bool
}
type CommitReport struct {
- Id string //nolint
+ Id string //nolint:revive,stylecheck
}
type ContainerExportOptions struct {
@@ -194,7 +196,7 @@ type CheckpointOptions struct {
type CheckpointReport struct {
Err error `json:"-"`
- Id string `json:"Id` //nolint
+ Id string `json:"Id"` //nolint:revive,stylecheck
RuntimeDuration int64 `json:"runtime_checkpoint_duration"`
CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"`
}
@@ -220,13 +222,13 @@ type RestoreOptions struct {
type RestoreReport struct {
Err error `json:"-"`
- Id string `json:"Id` //nolint
+ Id string `json:"Id"` //nolint:revive,stylecheck
RuntimeDuration int64 `json:"runtime_restore_duration"`
CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"`
}
type ContainerCreateReport struct {
- Id string //nolint
+ Id string //nolint:revive,stylecheck
}
// AttachOptions describes the cli and other values
@@ -305,7 +307,7 @@ type ContainerStartOptions struct {
// ContainerStartReport describes the response from starting
// containers from the cli
type ContainerStartReport struct {
- Id string //nolint
+ Id string //nolint:revive,stylecheck
RawInput string
Err error
ExitCode int
@@ -349,7 +351,7 @@ type ContainerRunOptions struct {
// a container
type ContainerRunReport struct {
ExitCode int
- Id string //nolint
+ Id string //nolint:revive,stylecheck
}
// ContainerCleanupOptions are the CLI values for the
@@ -366,7 +368,7 @@ type ContainerCleanupOptions struct {
// container cleanup
type ContainerCleanupReport struct {
CleanErr error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
RmErr error
RmiErr error
}
@@ -382,7 +384,7 @@ type ContainerInitOptions struct {
// container init
type ContainerInitReport struct {
Err error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
}
// ContainerMountOptions describes the input values for mounting containers
@@ -404,7 +406,7 @@ type ContainerUnmountOptions struct {
// ContainerMountReport describes the response from container mount
type ContainerMountReport struct {
Err error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
Name string
Path string
}
@@ -412,7 +414,7 @@ type ContainerMountReport struct {
// ContainerUnmountReport describes the response from umounting a container
type ContainerUnmountReport struct {
Err error
- Id string //nolint
+ Id string //nolint:revive,stylecheck
}
// ContainerPruneOptions describes the options needed
@@ -431,7 +433,7 @@ type ContainerPortOptions struct {
// ContainerPortReport describes the output needed for
// the CLI to output ports
type ContainerPortReport struct {
- Id string //nolint
+ Id string //nolint:revive,stylecheck
Ports []nettypes.PortMapping
}
@@ -441,6 +443,9 @@ type ContainerCpOptions struct {
Pause bool
// Extract the tarfile into the destination directory.
Extract bool
+ // OverwriteDirNonDir allows for overwriting a directory with a
+ // non-directory and vice versa.
+ OverwriteDirNonDir bool
}
// ContainerStatsOptions describes input options for getting
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
index 6b70a3452..e4eb808b4 100644
--- a/pkg/domain/entities/engine_container.go
+++ b/pkg/domain/entities/engine_container.go
@@ -71,6 +71,7 @@ type ContainerEngine interface {
PlayKube(ctx context.Context, body io.Reader, opts PlayKubeOptions) (*PlayKubeReport, error)
PlayKubeDown(ctx context.Context, body io.Reader, opts PlayKubeDownOptions) (*PlayKubeReport, error)
PodCreate(ctx context.Context, specg PodSpec) (*PodCreateReport, error)
+ PodClone(ctx context.Context, podClone PodCloneOptions) (*PodCloneReport, error)
PodExists(ctx context.Context, nameOrID string) (*BoolReport, error)
PodInspect(ctx context.Context, options PodInspectOptions) (*PodInspectReport, error)
PodKill(ctx context.Context, namesOrIds []string, options PodKillOptions) ([]*PodKillReport, error)
@@ -103,4 +104,5 @@ type ContainerEngine interface {
VolumePrune(ctx context.Context, options VolumePruneOptions) ([]*reports.PruneReport, error)
VolumeRm(ctx context.Context, namesOrIds []string, opts VolumeRmOptions) ([]*VolumeRmReport, error)
VolumeUnmount(ctx context.Context, namesOrIds []string) ([]*VolumeUnmountReport, error)
+ VolumeReload(ctx context.Context) (*VolumeReloadReport, error)
}
diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go
index 5011d82aa..5f76ae50b 100644
--- a/pkg/domain/entities/engine_image.go
+++ b/pkg/domain/entities/engine_image.go
@@ -22,12 +22,12 @@ type ImageEngine interface {
Push(ctx context.Context, source string, destination string, opts ImagePushOptions) error
Remove(ctx context.Context, images []string, opts ImageRemoveOptions) (*ImageRemoveReport, []error)
Save(ctx context.Context, nameOrID string, tags []string, options ImageSaveOptions) error
+ Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool) error
Search(ctx context.Context, term string, opts ImageSearchOptions) ([]ImageSearchReport, error)
SetTrust(ctx context.Context, args []string, options SetTrustOptions) error
ShowTrust(ctx context.Context, args []string, options ShowTrustOptions) (*ShowTrustReport, error)
Shutdown(ctx context.Context)
Tag(ctx context.Context, nameOrID string, tags []string, options ImageTagOptions) error
- Transfer(ctx context.Context, source ImageScpOptions, dest ImageScpOptions, parentFlags []string) error
Tree(ctx context.Context, nameOrID string, options ImageTreeOptions) (*ImageTreeReport, error)
Unmount(ctx context.Context, images []string, options ImageUnmountOptions) ([]*ImageUnmountReport, error)
Untag(ctx context.Context, nameOrID string, tags []string, options ImageUntagOptions) error
diff --git a/pkg/domain/entities/events.go b/pkg/domain/entities/events.go
index d8ba0f1d3..de218b285 100644
--- a/pkg/domain/entities/events.go
+++ b/pkg/domain/entities/events.go
@@ -14,6 +14,7 @@ type Event struct {
// TODO: it would be nice to have full control over the types at some
// point and fork such Docker types.
dockerEvents.Message
+ HealthStatus string
}
// ConvertToLibpodEvent converts an entities event to a libpod one.
@@ -44,6 +45,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
Status: status,
Time: time.Unix(0, e.TimeNano),
Type: t,
+ HealthStatus: e.HealthStatus,
Details: libpodEvents.Details{
Attributes: details,
},
@@ -59,7 +61,7 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
attributes["image"] = e.Image
attributes["name"] = e.Name
attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode)
- return &Event{dockerEvents.Message{
+ message := dockerEvents.Message{
// Compatibility with clients that still look for deprecated API elements
Status: e.Status.String(),
ID: e.ID,
@@ -73,5 +75,9 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
Scope: "local",
Time: e.Time.Unix(),
TimeNano: e.Time.UnixNano(),
- }}
+ }
+ return &Event{
+ message,
+ e.HealthStatus,
+ }
}
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 7081c5d25..da317cfad 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -46,14 +46,14 @@ type Image struct {
HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"`
}
-func (i *Image) Id() string { // nolint
+func (i *Image) Id() string { //nolint:revive,stylecheck
return i.ID
}
// swagger:model LibpodImageSummary
type ImageSummary struct {
ID string `json:"Id"`
- ParentId string // nolint
+ ParentId string //nolint:revive,stylecheck
RepoTags []string
RepoDigests []string
Created int64
@@ -66,13 +66,12 @@ type ImageSummary struct {
Dangling bool `json:",omitempty"`
// Podman extensions
- Names []string `json:",omitempty"`
- Digest string `json:",omitempty"`
- ConfigDigest string `json:",omitempty"`
- History []string `json:",omitempty"`
+ Names []string `json:",omitempty"`
+ Digest string `json:",omitempty"`
+ History []string `json:",omitempty"`
}
-func (i *ImageSummary) Id() string { // nolint
+func (i *ImageSummary) Id() string { //nolint:revive,stylecheck
return i.ID
}
@@ -291,7 +290,7 @@ type ImageImportOptions struct {
}
type ImageImportReport struct {
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
// ImageSaveOptions provide options for saving images.
@@ -326,6 +325,8 @@ type ImageScpOptions struct {
Image string `json:"image,omitempty"`
// User is used in conjunction with Transfer to determine if a valid user was given to save from/load into
User string `json:"user,omitempty"`
+ // Tag is the name to be used for the image on the destination
+ Tag string `json:"tag,omitempty"`
}
// ImageScpConnections provides the ssh related information used in remote image transfer
@@ -398,8 +399,7 @@ type ImageUnmountOptions struct {
// ImageMountReport describes the response from image mount
type ImageMountReport struct {
- Err error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
Name string
Repositories []string
Path string
@@ -408,5 +408,5 @@ type ImageMountReport struct {
// ImageUnmountReport describes the response from umounting an image
type ImageUnmountReport struct {
Err error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
diff --git a/pkg/domain/entities/machine.go b/pkg/domain/entities/machine.go
new file mode 100644
index 000000000..6ba53dbd1
--- /dev/null
+++ b/pkg/domain/entities/machine.go
@@ -0,0 +1,18 @@
+package entities
+
+type ListReporter struct {
+ Name string
+ Default bool
+ Created string
+ Running bool
+ Starting bool
+ LastUp string
+ Stream string
+ VMType string
+ CPUs uint64
+ Memory string
+ DiskSize string
+ Port int
+ RemoteUsername string
+ IdentityPath string
+}
diff --git a/pkg/domain/entities/manifest.go b/pkg/domain/entities/manifest.go
index 81f3e837b..e88c5f854 100644
--- a/pkg/domain/entities/manifest.go
+++ b/pkg/domain/entities/manifest.go
@@ -67,6 +67,21 @@ type ManifestModifyOptions struct {
type ManifestRemoveOptions struct {
}
+// ManifestRemoveReport provides the model for the removed manifest
+//
+// swagger:model
+type ManifestRemoveReport struct {
+ // Deleted manifest list.
+ Deleted []string `json:",omitempty"`
+ // Untagged images. Can be longer than Deleted.
+ Untagged []string `json:",omitempty"`
+ // Errors associated with operation
+ Errors []string `json:",omitempty"`
+ // ExitCode describes the exit codes as described in the `podman rmi`
+ // man page.
+ ExitCode int
+}
+
// ManifestModifyReport provides the model for removed digests and changed manifest
//
// swagger:model
diff --git a/pkg/domain/entities/network.go b/pkg/domain/entities/network.go
index 0f901c7f1..d375c2e20 100644
--- a/pkg/domain/entities/network.go
+++ b/pkg/domain/entities/network.go
@@ -22,7 +22,7 @@ type NetworkReloadOptions struct {
// NetworkReloadReport describes the results of reloading a container network.
type NetworkReloadReport struct {
- // nolint:stylecheck,revive
+ //nolint:stylecheck,revive
Id string
Err error
}
diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go
index 9cbbe2bf1..14ce370c1 100644
--- a/pkg/domain/entities/pods.go
+++ b/pkg/domain/entities/pods.go
@@ -20,15 +20,15 @@ type PodKillOptions struct {
type PodKillReport struct {
Errs []error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type ListPodsReport struct {
Cgroup string
Containers []*ListPodContainer
Created time.Time
- Id string // nolint
- InfraId string // nolint
+ Id string //nolint:revive,stylecheck
+ InfraId string //nolint:revive,stylecheck
Name string
Namespace string
// Network names connected to infra container
@@ -38,7 +38,7 @@ type ListPodsReport struct {
}
type ListPodContainer struct {
- Id string // nolint
+ Id string //nolint:revive,stylecheck
Names string
Status string
}
@@ -50,7 +50,7 @@ type PodPauseOptions struct {
type PodPauseReport struct {
Errs []error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type PodunpauseOptions struct {
@@ -60,7 +60,7 @@ type PodunpauseOptions struct {
type PodUnpauseReport struct {
Errs []error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type PodStopOptions struct {
@@ -72,7 +72,7 @@ type PodStopOptions struct {
type PodStopReport struct {
Errs []error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type PodRestartOptions struct {
@@ -82,7 +82,7 @@ type PodRestartOptions struct {
type PodRestartReport struct {
Errs []error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type PodStartOptions struct {
@@ -92,7 +92,7 @@ type PodStartOptions struct {
type PodStartReport struct {
Errs []error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type PodRmOptions struct {
@@ -105,7 +105,7 @@ type PodRmOptions struct {
type PodRmReport struct {
Err error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
// PddSpec is an abstracted version of PodSpecGen designed to eventually accept options
@@ -154,6 +154,16 @@ type PodLogsOptions struct {
Color bool
}
+// PodCloneOptions contains options for cloning an existing pod
+type PodCloneOptions struct {
+ ID string
+ Destroy bool
+ CreateOpts PodCreateOptions
+ InfraOptions ContainerCreateOptions
+ PerContainerOptions ContainerCreateOptions
+ Start bool
+}
+
type ContainerCreateOptions struct {
Annotation []string
Attach []string
@@ -287,7 +297,11 @@ func NewInfraContainerCreateOptions() ContainerCreateOptions {
}
type PodCreateReport struct {
- Id string // nolint
+ Id string //nolint:revive,stylecheck
+}
+
+type PodCloneReport struct {
+ Id string //nolint:revive,stylecheck
}
func (p *PodCreateOptions) CPULimits() *specs.LinuxCPU {
@@ -389,7 +403,7 @@ type PodPruneOptions struct {
type PodPruneReport struct {
Err error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type PodTopOptions struct {
diff --git a/pkg/domain/entities/reports/containers.go b/pkg/domain/entities/reports/containers.go
index 54bcd092b..db9a66012 100644
--- a/pkg/domain/entities/reports/containers.go
+++ b/pkg/domain/entities/reports/containers.go
@@ -1,7 +1,7 @@
package reports
type RmReport struct {
- Id string `json:"Id"` //nolint
+ Id string `json:"Id"` //nolint:revive,stylecheck
Err error `json:"Err,omitempty"`
}
diff --git a/pkg/domain/entities/reports/prune.go b/pkg/domain/entities/reports/prune.go
index 497e5d606..ac3d8e7ce 100644
--- a/pkg/domain/entities/reports/prune.go
+++ b/pkg/domain/entities/reports/prune.go
@@ -1,7 +1,7 @@
package reports
type PruneReport struct {
- Id string `json:"Id"` //nolint
+ Id string `json:"Id"` //nolint:revive,stylecheck
Err error `json:"Err,omitempty"`
Size uint64 `json:"Size"`
}
diff --git a/pkg/domain/entities/reports/scp.go b/pkg/domain/entities/reports/scp.go
new file mode 100644
index 000000000..1e102bab3
--- /dev/null
+++ b/pkg/domain/entities/reports/scp.go
@@ -0,0 +1,5 @@
+package reports
+
+type ScpReport struct {
+ Id string `json:"Id"` //nolint:revive,stylecheck
+}
diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go
index 21026477d..331d2bcdc 100644
--- a/pkg/domain/entities/system.go
+++ b/pkg/domain/entities/system.go
@@ -28,6 +28,7 @@ type SystemPruneReport struct {
PodPruneReport []*PodPruneReport
ContainerPruneReports []*reports.PruneReport
ImagePruneReports []*reports.PruneReport
+ NetworkPruneReports []*reports.PruneReport
VolumePruneReports []*reports.PruneReport
ReclaimedSpace uint64
}
diff --git a/pkg/domain/entities/types.go b/pkg/domain/entities/types.go
index 5ae8a4931..44df66498 100644
--- a/pkg/domain/entities/types.go
+++ b/pkg/domain/entities/types.go
@@ -21,7 +21,7 @@ type Volume struct {
}
type Report struct {
- Id []string // nolint
+ Id []string //nolint:revive,stylecheck
Err map[string]error
}
@@ -78,10 +78,9 @@ type InspectOptions struct {
// DiffOptions all API and CLI diff commands and diff sub-commands use the same options
type DiffOptions struct {
- Format string `json:",omitempty"` // CLI only
- Latest bool `json:",omitempty"` // API and CLI, only supported by containers
- Archive bool `json:",omitempty"` // CLI only
- Type define.DiffType // Type which should be compared
+ Format string `json:",omitempty"` // CLI only
+ Latest bool `json:",omitempty"` // API and CLI, only supported by containers
+ Type define.DiffType // Type which should be compared
}
// DiffReport provides changes for object
diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go
index 84f85b83f..9a06b2238 100644
--- a/pkg/domain/entities/volumes.go
+++ b/pkg/domain/entities/volumes.go
@@ -33,7 +33,7 @@ type VolumeRmOptions struct {
type VolumeRmReport struct {
Err error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
type VolumeInspectReport struct {
@@ -54,6 +54,11 @@ type VolumeListReport struct {
VolumeConfigResponse
}
+// VolumeReloadReport describes the response from reload volume plugins
+type VolumeReloadReport struct {
+ define.VolumeReload
+}
+
/*
* Docker API compatibility types
*/
@@ -61,7 +66,7 @@ type VolumeListReport struct {
// VolumeMountReport describes the response from volume mount
type VolumeMountReport struct {
Err error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
Name string
Path string
}
@@ -69,5 +74,5 @@ type VolumeMountReport struct {
// VolumeUnmountReport describes the response from umounting a volume
type VolumeUnmountReport struct {
Err error
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
diff --git a/pkg/domain/filters/containers.go b/pkg/domain/filters/containers.go
index 3e5b9cad9..f88a165e7 100644
--- a/pkg/domain/filters/containers.go
+++ b/pkg/domain/filters/containers.go
@@ -1,15 +1,16 @@
package filters
import (
+ "errors"
"fmt"
"strconv"
"strings"
"time"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
// GenerateContainerFilterFuncs return ContainerFilter functions based of filter.
@@ -35,7 +36,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
for _, exitCode := range filterValues {
ec, err := strconv.ParseInt(exitCode, 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "exited code out of range %q", ec)
+ return nil, fmt.Errorf("exited code out of range %q: %w", ec, err)
}
exitCodes = append(exitCodes, int32(ec))
}
@@ -183,7 +184,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
for _, podNameOrID := range filterValues {
p, err := r.LookupPod(podNameOrID)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchPod {
+ if errors.Is(err, define.ErrNoSuchPod) {
continue
}
return nil, err
@@ -257,7 +258,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
return false
}
for _, net := range networks {
- if util.StringInSlice(net, inputNetNames) {
+ if cutil.StringInSlice(net, inputNetNames) {
return true
}
}
@@ -290,7 +291,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
return false
}, filterValueError
}
- return nil, errors.Errorf("%s is an invalid filter", filter)
+ return nil, fmt.Errorf("%s is an invalid filter", filter)
}
// GeneratePruneContainerFilterFuncs return ContainerFilter functions based of filter for prune operation
@@ -303,7 +304,7 @@ func GeneratePruneContainerFilterFuncs(filter string, filterValues []string, r *
case "until":
return prepareUntilFilterFunc(filterValues)
}
- return nil, errors.Errorf("%s is an invalid filter", filter)
+ return nil, fmt.Errorf("%s is an invalid filter", filter)
}
func prepareUntilFilterFunc(filterValues []string) (func(container *libpod.Container) bool, error) {
diff --git a/pkg/domain/filters/pods.go b/pkg/domain/filters/pods.go
index e22480006..78b97db64 100644
--- a/pkg/domain/filters/pods.go
+++ b/pkg/domain/filters/pods.go
@@ -1,13 +1,15 @@
package filters
import (
+ "errors"
+ "fmt"
"strconv"
"strings"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
// GeneratePodFilterFunc takes a filter and filtervalue (key, value)
@@ -57,8 +59,8 @@ func GeneratePodFilterFunc(filter string, filterValues []string, r *libpod.Runti
}, nil
case "ctr-status":
for _, filterValue := range filterValues {
- if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
- return nil, errors.Errorf("%s is not a valid status", filterValue)
+ if !cutil.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
+ return nil, fmt.Errorf("%s is not a valid status", filterValue)
}
}
return func(p *libpod.Pod) bool {
@@ -94,8 +96,8 @@ func GeneratePodFilterFunc(filter string, filterValues []string, r *libpod.Runti
}, nil
case "status":
for _, filterValue := range filterValues {
- if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created", "degraded"}) {
- return nil, errors.Errorf("%s is not a valid pod status", filterValue)
+ if !cutil.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created", "degraded"}) {
+ return nil, fmt.Errorf("%s is not a valid pod status", filterValue)
}
}
return func(p *libpod.Pod) bool {
@@ -150,12 +152,12 @@ func GeneratePodFilterFunc(filter string, filterValues []string, r *libpod.Runti
return false
}
for _, net := range networks {
- if util.StringInSlice(net, inputNetNames) {
+ if cutil.StringInSlice(net, inputNetNames) {
return true
}
}
return false
}, nil
}
- return nil, errors.Errorf("%s is an invalid filter", filter)
+ return nil, fmt.Errorf("%s is an invalid filter", filter)
}
diff --git a/pkg/domain/filters/volumes.go b/pkg/domain/filters/volumes.go
index e88bd4228..7c5047225 100644
--- a/pkg/domain/filters/volumes.go
+++ b/pkg/domain/filters/volumes.go
@@ -1,12 +1,13 @@
package filters
import (
+ "fmt"
"net/url"
+ "regexp"
"strings"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func GenerateVolumeFilters(filters url.Values) ([]libpod.VolumeFilter, error) {
@@ -15,9 +16,12 @@ func GenerateVolumeFilters(filters url.Values) ([]libpod.VolumeFilter, error) {
for _, val := range v {
switch filter {
case "name":
- nameVal := val
+ nameRegexp, err := regexp.Compile(val)
+ if err != nil {
+ return nil, err
+ }
vf = append(vf, func(v *libpod.Volume) bool {
- return nameVal == v.Name()
+ return nameRegexp.MatchString(v.Name())
})
case "driver":
driverVal := val
@@ -68,7 +72,7 @@ func GenerateVolumeFilters(filters url.Values) ([]libpod.VolumeFilter, error) {
// invert the result of IsDangling.
invert = true
default:
- return nil, errors.Errorf("%q is not a valid value for the \"dangling\" filter - must be true or false", danglingVal)
+ return nil, fmt.Errorf("%q is not a valid value for the \"dangling\" filter - must be true or false", danglingVal)
}
vf = append(vf, func(v *libpod.Volume) bool {
dangling, err := v.IsDangling()
@@ -81,7 +85,7 @@ func GenerateVolumeFilters(filters url.Values) ([]libpod.VolumeFilter, error) {
return dangling
})
default:
- return nil, errors.Errorf("%q is an invalid volume filter", filter)
+ return nil, fmt.Errorf("%q is an invalid volume filter", filter)
}
}
}
@@ -105,7 +109,7 @@ func GeneratePruneVolumeFilters(filters url.Values) ([]libpod.VolumeFilter, erro
}
vf = append(vf, f)
default:
- return nil, errors.Errorf("%q is an invalid volume filter", filter)
+ return nil, fmt.Errorf("%q is an invalid volume filter", filter)
}
}
}
diff --git a/pkg/domain/infra/abi/archive.go b/pkg/domain/infra/abi/archive.go
index 01e3c7dd1..de96cf8b0 100644
--- a/pkg/domain/infra/abi/archive.go
+++ b/pkg/domain/infra/abi/archive.go
@@ -12,10 +12,10 @@ func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrI
if err != nil {
return nil, err
}
- return container.CopyFromArchive(ctx, containerPath, options.Chown, options.Rename, reader)
+ return container.CopyFromArchive(ctx, containerPath, options.Chown, options.NoOverwriteDirNonDir, options.Rename, reader)
}
-func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID string, containerPath string, writer io.Writer) (entities.ContainerCopyFunc, error) {
+func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID, containerPath string, writer io.Writer) (entities.ContainerCopyFunc, error) {
container, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil {
return nil, err
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index d2fafccb1..1688be57e 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -2,6 +2,7 @@ package abi
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -16,7 +17,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/libpod/logs"
"github.com/containers/podman/v4/pkg/checkpoint"
"github.com/containers/podman/v4/pkg/domain/entities"
@@ -33,12 +33,11 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// getContainersAndInputByContext gets containers whether all, latest, or a slice of names/ids
-// is specified. It also returns a list of the corresponding input name used to lookup each container.
+// is specified. It also returns a list of the corresponding input name used to look up each container.
func getContainersAndInputByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, rawInput []string, err error) {
var ctr *libpod.Container
ctrs = []*libpod.Container{}
@@ -81,7 +80,7 @@ func getContainersByContext(all, latest bool, names []string, runtime *libpod.Ru
func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string, options entities.ContainerExistsOptions) (*entities.BoolReport, error) {
_, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil {
- if errors.Cause(err) != define.ErrNoSuchCtr {
+ if !errors.Is(err, define.ErrNoSuchCtr) {
return nil, err
}
if options.External {
@@ -121,7 +120,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := c.Pause()
- if err != nil && options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if err != nil && options.All && errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Debugf("Container %s is not running", c.ID())
continue
}
@@ -138,7 +137,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := c.Unpause()
- if err != nil && options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if err != nil && options.All && errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Debugf("Container %s is not paused", c.ID())
continue
}
@@ -149,7 +148,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
names := namesOrIds
ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, names, ic.Libpod)
- if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) {
return nil, err
}
ctrMap := map[string]string{}
@@ -167,13 +166,13 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
}
if err != nil {
switch {
- case errors.Cause(err) == define.ErrCtrStopped:
+ case errors.Is(err, define.ErrCtrStopped):
logrus.Debugf("Container %s is already stopped", c.ID())
- case options.All && errors.Cause(err) == define.ErrCtrStateInvalid:
+ case options.All && errors.Is(err, define.ErrCtrStateInvalid):
logrus.Debugf("Container %s is not running, could not stop", c.ID())
// container never created in OCI runtime
// docker parity: do nothing just return container id
- case errors.Cause(err) == define.ErrCtrStateInvalid:
+ case errors.Is(err, define.ErrCtrStateInvalid):
logrus.Debugf("Container %s is either not created on runtime or is in a invalid state", c.ID())
default:
return err
@@ -183,7 +182,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
if err != nil {
// Issue #7384 and #11384: If the container is configured for
// auto-removal, it might already have been removed at this point.
- // We still need to to cleanup since we do not know if the other cleanup process is successful
+ // We still need to clean up since we do not know if the other cleanup process is successful
if c.AutoRemove() && (errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved)) {
return nil
}
@@ -239,7 +238,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
reports := make([]*entities.KillReport, 0, len(ctrs))
for _, con := range ctrs {
err := con.Kill(uint(sig))
- if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if options.All && errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Debugf("Container %s is not running", con.ID())
continue
}
@@ -290,8 +289,7 @@ func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Cont
return nil
}
logrus.Debugf("Failed to remove container %s: %s", ctr.ID(), err.Error())
- switch errors.Cause(err) {
- case define.ErrNoSuchCtr:
+ if errors.Is(err, define.ErrNoSuchCtr) {
// Ignore if the container does not exist (anymore) when either
// it has been requested by the user of if the container is a
// service one. Service containers are removed along with its
@@ -302,7 +300,7 @@ func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Cont
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
return nil
}
- case define.ErrCtrRemoved:
+ } else if errors.Is(err, define.ErrCtrRemoved) {
return nil
}
return err
@@ -318,15 +316,15 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
for _, ctr := range names {
report := reports.RmReport{Id: ctr}
report.Err = ic.Libpod.RemoveStorageContainer(ctr, options.Force)
- switch errors.Cause(report.Err) {
- case nil:
+ //nolint:gocritic
+ if report.Err == nil {
// remove container names that we successfully deleted
rmReports = append(rmReports, &report)
- case define.ErrNoSuchCtr, define.ErrCtrExists:
+ } else if errors.Is(report.Err, define.ErrNoSuchCtr) || errors.Is(report.Err, define.ErrCtrExists) {
// There is still a potential this is a libpod container
tmpNames = append(tmpNames, ctr)
- default:
- if _, err := ic.Libpod.LookupContainer(ctr); errors.Cause(err) == define.ErrNoSuchCtr {
+ } else {
+ if _, err := ic.Libpod.LookupContainer(ctr); errors.Is(err, define.ErrNoSuchCtr) {
// remove container failed, but not a libpod container
rmReports = append(rmReports, &report)
continue
@@ -338,7 +336,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
names = tmpNames
ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
- if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) {
// Failed to get containers. If force is specified, get the containers ID
// and evict them
if !options.Force {
@@ -350,7 +348,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
report := reports.RmReport{Id: ctr}
_, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
if err != nil {
- if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ if options.Ignore && errors.Is(err, define.ErrNoSuchCtr) {
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
rmReports = append(rmReports, &report)
continue
@@ -427,7 +425,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
ctr, err := ic.Libpod.GetLatestContainer()
if err != nil {
if errors.Is(err, define.ErrNoSuchCtr) {
- return nil, []error{errors.Wrapf(err, "no containers to inspect")}, nil
+ return nil, []error{fmt.Errorf("no containers to inspect: %w", err)}, nil
}
return nil, nil, err
}
@@ -453,7 +451,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
// ErrNoSuchCtr is non-fatal, other errors will be
// treated as fatal.
if errors.Is(err, define.ErrNoSuchCtr) {
- errs = append(errs, errors.Errorf("no such container %s", name))
+ errs = append(errs, fmt.Errorf("no such container %s", name))
continue
}
return nil, nil, err
@@ -464,7 +462,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
// ErrNoSuchCtr is non-fatal, other errors will be
// treated as fatal.
if errors.Is(err, define.ErrNoSuchCtr) {
- errs = append(errs, errors.Errorf("no such container %s", name))
+ errs = append(errs, fmt.Errorf("no such container %s", name))
continue
}
return nil, nil, err
@@ -488,7 +486,7 @@ func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.To
container, err = ic.Libpod.LookupContainer(options.NameOrID)
}
if err != nil {
- return nil, errors.Wrap(err, "unable to lookup requested container")
+ return nil, fmt.Errorf("unable to look up requested container: %w", err)
}
// Run Top.
@@ -513,12 +511,12 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string,
case "oci":
mimeType = buildah.OCIv1ImageManifest
if len(options.Message) > 0 {
- return nil, errors.Errorf("messages are only compatible with the docker image format (-f docker)")
+ return nil, fmt.Errorf("messages are only compatible with the docker image format (-f docker)")
}
case "docker":
mimeType = manifest.DockerV2Schema2MediaType
default:
- return nil, errors.Errorf("unrecognized image format %q", options.Format)
+ return nil, fmt.Errorf("unrecognized image format %q", options.Format)
}
sc := ic.Libpod.SystemContext()
@@ -616,6 +614,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
ImportPrevious: options.ImportPrevious,
Pod: options.Pod,
PrintStats: options.PrintStats,
+ FileLocks: options.FileLocks,
}
filterFuncs := []libpod.ContainerFilter{
@@ -634,13 +633,13 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
containers, err = getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
default:
for _, nameOrID := range namesOrIds {
- logrus.Debugf("lookup container: %q", nameOrID)
+ logrus.Debugf("look up container: %q", nameOrID)
ctr, err := ic.Libpod.LookupContainer(nameOrID)
if err == nil {
containers = append(containers, ctr)
} else {
// If container was not found, check if this is a checkpoint image
- logrus.Debugf("lookup image: %q", nameOrID)
+ logrus.Debugf("look up image: %q", nameOrID)
img, _, err := ic.Libpod.LibimageRuntime().LookupImage(nameOrID, nil)
if err != nil {
return nil, fmt.Errorf("no such container or image: %s", nameOrID)
@@ -660,7 +659,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
// CRImportCheckpoint is expected to import exactly one container from checkpoint image
checkpointImageImportErrors = append(
checkpointImageImportErrors,
- errors.Errorf("unable to import checkpoint from image: %q: %v", nameOrID, err),
+ fmt.Errorf("unable to import checkpoint from image: %q: %v", nameOrID, err),
)
} else {
containers = append(containers, importedContainers[0])
@@ -720,16 +719,16 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string,
ctr := ctrs[0]
conState, err := ctr.State()
if err != nil {
- return errors.Wrapf(err, "unable to determine state of %s", ctr.ID())
+ return fmt.Errorf("unable to determine state of %s: %w", ctr.ID(), err)
}
if conState != define.ContainerStateRunning {
- return errors.Errorf("you can only attach to running containers")
+ return fmt.Errorf("you can only attach to running containers")
}
// If the container is in a pod, also set to recursively start dependencies
err = terminal.StartAttachCtr(ctx, ctr, options.Stdout, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, false)
- if err != nil && errors.Cause(err) != define.ErrDetach {
- return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
+ if err != nil && !errors.Is(err, define.ErrDetach) {
+ return fmt.Errorf("error attaching to container %s: %w", ctr.ID(), err)
}
os.Stdout.WriteString("\n")
return nil
@@ -751,12 +750,12 @@ func makeExecConfig(options entities.ExecOptions, rt *libpod.Runtime) (*libpod.E
storageConfig := rt.StorageConfig()
runtimeConfig, err := rt.GetConfig()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command")
+ return nil, fmt.Errorf("error retrieving Libpod configuration to build exec exit command: %w", err)
}
// TODO: Add some ability to toggle syslog
exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, true)
if err != nil {
- return nil, errors.Wrapf(err, "error constructing exit command for exec session")
+ return nil, fmt.Errorf("error constructing exit command for exec session: %w", err)
}
execConfig.ExitCommand = exitCommandArgs
@@ -774,7 +773,7 @@ func checkExecPreserveFDs(options entities.ExecOptions) error {
for _, e := range entries {
i, err := strconv.Atoi(e.Name())
if err != nil {
- return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
+ return fmt.Errorf("cannot parse %s in /proc/self/fd: %w", e.Name(), err)
}
m[i] = true
}
@@ -891,7 +890,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
if options.Attach {
err = terminal.StartAttachCtr(ctx, ctr, options.Stdout, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, !ctrRunning)
- if errors.Cause(err) == define.ErrDetach {
+ if errors.Is(err, define.ErrDetach) {
// User manually detached
// Exit cleanly immediately
reports = append(reports, &entities.ContainerStartReport{
@@ -903,7 +902,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
return reports, nil
}
- if errors.Cause(err) == define.ErrWillDeadlock {
+ if errors.Is(err, define.ErrWillDeadlock) {
logrus.Debugf("Deadlock error: %v", err)
reports = append(reports, &entities.ContainerStartReport{
Id: ctr.ID(),
@@ -911,7 +910,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
Err: err,
ExitCode: define.ExitCode(err),
})
- return reports, errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
+ return reports, fmt.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
}
if ctrRunning {
@@ -936,8 +935,9 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
}
}
- return reports, errors.Wrapf(err, "unable to start container %s", ctr.ID())
+ return reports, fmt.Errorf("unable to start container %s: %w", ctr.ID(), err)
}
+
exitCode = ic.GetContainerExitCode(ctx, ctr)
reports = append(reports, &entities.ContainerStartReport{
Id: ctr.ID(),
@@ -959,12 +959,12 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
}
if err := ctr.Start(ctx, true); err != nil {
report.Err = err
- if errors.Cause(err) == define.ErrWillDeadlock {
- report.Err = errors.Wrapf(err, "please run 'podman system renumber' to resolve deadlocks")
+ if errors.Is(err, define.ErrWillDeadlock) {
+ report.Err = fmt.Errorf("please run 'podman system renumber' to resolve deadlocks: %w", err)
reports = append(reports, report)
continue
}
- report.Err = errors.Wrapf(err, "unable to start container %q", ctr.ID())
+ report.Err = fmt.Errorf("unable to start container %q: %w", ctr.ID(), err)
reports = append(reports, report)
if ctr.AutoRemove() {
if err := ic.removeContainer(ctx, ctr, entities.RmOptions{}); err != nil {
@@ -1000,7 +1000,7 @@ func (ic *ContainerEngine) Diff(ctx context.Context, namesOrIDs []string, opts e
if opts.Latest {
ctnr, err := ic.Libpod.GetLatestContainer()
if err != nil {
- return nil, errors.Wrap(err, "unable to get latest container")
+ return nil, fmt.Errorf("unable to get latest container: %w", err)
}
base = ctnr.ID()
}
@@ -1063,7 +1063,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
// We've manually detached from the container
// Do not perform cleanup, or wait for container exit code
// Just exit immediately
- if errors.Cause(err) == define.ErrDetach {
+ if errors.Is(err, define.ErrDetach) {
report.ExitCode = 0
return &report, nil
}
@@ -1073,10 +1073,10 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
logrus.Debugf("unable to remove container %s after failing to start and attach to it", ctr.ID())
}
}
- if errors.Cause(err) == define.ErrWillDeadlock {
+ if errors.Is(err, define.ErrWillDeadlock) {
logrus.Debugf("Deadlock error on %q: %v", ctr.ID(), err)
report.ExitCode = define.ExitCode(err)
- return &report, errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
+ return &report, fmt.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
}
report.ExitCode = define.ExitCode(err)
return &report, err
@@ -1085,8 +1085,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
if opts.Rm && !ctr.ShouldRestart(ctx) {
var timeout *uint
if err := ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr ||
- errors.Cause(err) == define.ErrCtrRemoved {
+ if errors.Is(err, define.ErrNoSuchCtr) ||
+ errors.Is(err, define.ErrCtrRemoved) {
logrus.Infof("Container %s was already removed, skipping --rm", ctr.ID())
} else {
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
@@ -1098,25 +1098,11 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
func (ic *ContainerEngine) GetContainerExitCode(ctx context.Context, ctr *libpod.Container) int {
exitCode, err := ctr.Wait(ctx)
- if err == nil {
- return int(exitCode)
- }
- if errors.Cause(err) != define.ErrNoSuchCtr {
- logrus.Errorf("Could not retrieve exit code: %v", err)
+ if err != nil {
+ logrus.Errorf("Waiting for container %s: %v", ctr.ID(), err)
return define.ExecErrorCodeNotFound
}
- // Make 4 attempt with 0.25s backoff between each for 1 second total
- var event *events.Event
- for i := 0; i < 4; i++ {
- event, err = ic.Libpod.GetLastContainerEvent(ctx, ctr.ID(), events.Exited)
- if err != nil {
- time.Sleep(250 * time.Millisecond)
- continue
- }
- return event.ContainerExitCode
- }
- logrus.Errorf("Could not retrieve exit code from event: %v", err)
- return define.ExecErrorCodeNotFound
+ return int(exitCode)
}
func (ic *ContainerEngine) ContainerLogs(ctx context.Context, containers []string, options entities.ContainerLogsOptions) error {
@@ -1193,12 +1179,12 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
var timeout *uint
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout)
if err != nil {
- report.RmErr = errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
+ report.RmErr = fmt.Errorf("failed to clean up and remove container %v: %w", ctr.ID(), err)
}
} else {
err := ctr.Cleanup(ctx)
if err != nil {
- report.CleanErr = errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
+ report.CleanErr = fmt.Errorf("failed to clean up container %v: %w", ctr.ID(), err)
}
}
@@ -1225,7 +1211,7 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
err := ctr.Init(ctx, ctr.PodID() != "")
// If we're initializing all containers, ignore invalid state errors
- if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if options.All && errors.Is(err, define.ErrCtrStateInvalid) {
err = nil
}
report.Err = err
@@ -1336,7 +1322,7 @@ func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []str
if mounted {
report := entities.ContainerUnmountReport{Id: sctr.ID}
if _, report.Err = ic.Libpod.UnmountStorageContainer(sctr.ID, options.Force); report.Err != nil {
- if errors.Cause(report.Err) != define.ErrCtrExists {
+ if !errors.Is(report.Err, define.ErrCtrExists) {
reports = append(reports, &report)
}
} else {
@@ -1370,11 +1356,11 @@ func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []str
report := entities.ContainerUnmountReport{Id: ctr.ID()}
if err := ctr.Unmount(options.Force); err != nil {
- if options.All && errors.Cause(err) == storage.ErrLayerNotMounted {
+ if options.All && errors.Is(err, storage.ErrLayerNotMounted) {
logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID())
continue
}
- report.Err = errors.Wrapf(err, "error unmounting container %s", ctr.ID())
+ report.Err = fmt.Errorf("error unmounting container %s: %w", ctr.ID(), err)
}
reports = append(reports, &report)
}
@@ -1423,7 +1409,7 @@ func (ic *ContainerEngine) Shutdown(_ context.Context) {
func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) (statsChan chan entities.ContainerStatsReport, err error) {
if options.Interval < 1 {
- return nil, errors.New("Invalid interval, must be a positive number greater zero")
+ return nil, errors.New("invalid interval, must be a positive number greater zero")
}
if rootless.IsRootless() {
unified, err := cgroups.IsCgroup2UnifiedMode()
@@ -1478,19 +1464,18 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri
computeStats := func() ([]define.ContainerStats, error) {
containers, err = containerFunc()
if err != nil {
- return nil, errors.Wrapf(err, "unable to get list of containers")
+ return nil, fmt.Errorf("unable to get list of containers: %w", err)
}
reportStats := []define.ContainerStats{}
for _, ctr := range containers {
stats, err := ctr.GetContainerStats(containerStats[ctr.ID()])
if err != nil {
- cause := errors.Cause(err)
- if queryAll && (cause == define.ErrCtrRemoved || cause == define.ErrNoSuchCtr || cause == define.ErrCtrStateInvalid) {
+ if queryAll && (errors.Is(err, define.ErrCtrRemoved) || errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrStateInvalid)) {
continue
}
- if cause == cgroups.ErrCgroupV1Rootless {
- err = cause
+ if errors.Is(err, cgroups.ErrCgroupV1Rootless) {
+ err = cgroups.ErrCgroupV1Rootless
}
return nil, err
}
@@ -1592,6 +1577,11 @@ func (ic *ContainerEngine) ContainerClone(ctx context.Context, ctrCloneOpts enti
return nil, err
}
+ conf := c.Config()
+ if conf.Spec != nil && conf.Spec.Process != nil && conf.Spec.Process.Terminal { // if we do not pass term, running ctrs exit
+ spec.Terminal = true
+ }
+
// Print warnings
if len(out) > 0 {
for _, w := range out {
@@ -1611,8 +1601,8 @@ func (ic *ContainerEngine) ContainerClone(ctx context.Context, ctrCloneOpts enti
switch {
case strings.Contains(n, "-clone"):
ind := strings.Index(n, "-clone") + 6
- num, _ := strconv.Atoi(n[ind:])
- if num == 0 { // clone1 is hard to get with this logic, just check for it here.
+ num, err := strconv.Atoi(n[ind:])
+ if num == 0 && err != nil { // clone1 is hard to get with this logic, just check for it here.
_, err = ic.Libpod.LookupContainer(n + "1")
if err != nil {
spec.Name = n + "1"
diff --git a/pkg/domain/infra/abi/containers_runlabel.go b/pkg/domain/infra/abi/containers_runlabel.go
index fac15f72d..463988c87 100644
--- a/pkg/domain/infra/abi/containers_runlabel.go
+++ b/pkg/domain/infra/abi/containers_runlabel.go
@@ -2,6 +2,7 @@ package abi
import (
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -14,7 +15,6 @@ import (
envLib "github.com/containers/podman/v4/pkg/env"
"github.com/containers/podman/v4/utils"
"github.com/google/shlex"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -40,7 +40,7 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
}
if len(pulledImages) != 1 {
- return errors.Errorf("internal error: expected an image to be pulled (or an error)")
+ return errors.New("internal error: expected an image to be pulled (or an error)")
}
// Extract the runlabel from the image.
@@ -57,7 +57,7 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
}
}
if runlabel == "" {
- return errors.Errorf("cannot find the value of label: %s in image: %s", label, imageRef)
+ return fmt.Errorf("cannot find the value of label: %s in image: %s", label, imageRef)
}
cmd, env, err := generateRunlabelCommand(runlabel, pulledImages[0], imageRef, args, options)
@@ -86,7 +86,7 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
name := cmd[i+1]
ctr, err := ic.Libpod.LookupContainer(name)
if err != nil {
- if errors.Cause(err) != define.ErrNoSuchCtr {
+ if !errors.Is(err, define.ErrNoSuchCtr) {
logrus.Debugf("Error occurred searching for container %s: %v", name, err)
return err
}
diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go
index ff85dee9b..5c1047b74 100644
--- a/pkg/domain/infra/abi/generate.go
+++ b/pkg/domain/infra/abi/generate.go
@@ -12,7 +12,6 @@ import (
k8sAPI "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1"
"github.com/containers/podman/v4/pkg/systemd/generate"
"github.com/ghodss/yaml"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) {
@@ -30,8 +29,8 @@ func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string,
// If it's not a container, we either have a pod or garbage.
pod, err := ic.Libpod.LookupPod(nameOrID)
if err != nil {
- err = errors.Wrap(ctrErr, err.Error())
- return nil, errors.Wrapf(err, "%s does not refer to a container or pod", nameOrID)
+ err = fmt.Errorf("%v: %w", err.Error(), ctrErr)
+ return nil, fmt.Errorf("%s does not refer to a container or pod: %w", nameOrID, err)
}
// Generate the units for the pod and all its containers.
@@ -63,7 +62,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string,
// now that infra holds NS data, we need to support dependencies.
// we cannot deal with ctrs already in a pod.
if len(ctr.PodID()) > 0 {
- return nil, errors.Errorf("container %s is associated with pod %s: use generate on the pod itself", ctr.ID(), ctr.PodID())
+ return nil, fmt.Errorf("container %s is associated with pod %s: use generate on the pod itself", ctr.ID(), ctr.PodID())
}
ctrs = append(ctrs, ctr)
continue
@@ -92,7 +91,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string,
}
// If it reaches here is because the name or id did not exist.
- return nil, errors.Errorf("Name or ID %q not found", nameOrID)
+ return nil, fmt.Errorf("name or ID %q not found", nameOrID)
}
// Generate kube persistent volume claims from volumes.
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index c3ec7dd8a..38008c7b9 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -2,7 +2,9 @@ package abi
import (
"context"
+ "errors"
"fmt"
+ "io/fs"
"io/ioutil"
"net/url"
"os"
@@ -29,12 +31,10 @@ import (
domainUtils "github.com/containers/podman/v4/pkg/domain/utils"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/containers/podman/v4/utils"
"github.com/containers/storage"
dockerRef "github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -128,14 +128,14 @@ func (ir *ImageEngine) History(ctx context.Context, nameOrID string, opts entiti
func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entities.ImageMountOptions) ([]*entities.ImageMountReport, error) {
if opts.All && len(nameOrIDs) > 0 {
- return nil, errors.Errorf("cannot mix --all with images")
+ return nil, errors.New("cannot mix --all with images")
}
if os.Geteuid() != 0 {
if driver := ir.Libpod.StorageConfig().GraphDriverName; driver != "vfs" {
// Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
// of the mount command.
- return nil, errors.Errorf("cannot mount using driver %s in rootless mode", driver)
+ return nil, fmt.Errorf("cannot mount using driver %s in rootless mode", driver)
}
became, ret, err := rootless.BecomeRootInUserNS("")
@@ -159,10 +159,6 @@ func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entit
mountReports := []*entities.ImageMountReport{}
listMountsOnly := !opts.All && len(nameOrIDs) == 0
for _, i := range images {
- // TODO: the .Err fields are not used. This pre-dates the
- // libimage migration but should be addressed at some point.
- // A quick glimpse at cmd/podman/image/mount.go suggests that
- // the errors needed to be handled there as well.
var mountPoint string
var err error
if listMountsOnly {
@@ -198,7 +194,7 @@ func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entit
func (ir *ImageEngine) Unmount(ctx context.Context, nameOrIDs []string, options entities.ImageUnmountOptions) ([]*entities.ImageUnmountReport, error) {
if options.All && len(nameOrIDs) > 0 {
- return nil, errors.Errorf("cannot mix --all with images")
+ return nil, errors.New("cannot mix --all with images")
}
listImagesOptions := &libimage.ListImagesOptions{}
@@ -296,7 +292,7 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
case "v2s2", "docker":
manifestType = manifest.DockerV2Schema2MediaType
default:
- return errors.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", options.Format)
+ return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", options.Format)
}
pushOptions := &libimage.PushOptions{}
@@ -354,22 +350,6 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
}
return pushError
}
-
-// Transfer moves images between root and rootless storage so the user specified in the scp call can access and use the image modified by root
-func (ir *ImageEngine) Transfer(ctx context.Context, source entities.ImageScpOptions, dest entities.ImageScpOptions, parentFlags []string) error {
- if source.User == "" {
- return errors.Wrapf(define.ErrInvalidArg, "you must define a user when transferring from root to rootless storage")
- }
- podman, err := os.Executable()
- if err != nil {
- return err
- }
- if rootless.IsRootless() && (len(dest.User) == 0 || dest.User == "root") { // if we are rootless and do not have a destination user we can just use sudo
- return transferRootless(source, dest, podman, parentFlags)
- }
- return transferRootful(source, dest, podman, parentFlags)
-}
-
func (ir *ImageEngine) Tag(ctx context.Context, nameOrID string, tags []string, options entities.ImageTagOptions) error {
// Allow tagging manifest list instead of resolving instances from manifest
lookupOptions := &libimage.LookupImageOptions{ManifestList: true}
@@ -543,12 +523,12 @@ func removeErrorsToExitCode(rmErrors []error) int {
}
for _, e := range rmErrors {
- switch errors.Cause(e) {
- case storage.ErrImageUnknown, storage.ErrLayerUnknown:
+ //nolint:gocritic
+ if errors.Is(e, storage.ErrImageUnknown) || errors.Is(e, storage.ErrLayerUnknown) {
noSuchImageErrors = true
- case storage.ErrImageUsedByContainer:
+ } else if errors.Is(e, storage.ErrImageUsedByContainer) {
inUseErrors = true
- default:
+ } else {
otherErrors = true
}
}
@@ -597,7 +577,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie
rmErrors = libimageErrors
- return //nolint
+ return
}
// Shutdown Libpod engine
@@ -610,11 +590,11 @@ func (ir *ImageEngine) Shutdown(_ context.Context) {
func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
- return nil, errors.Wrap(err, "error initializing GPG")
+ return nil, fmt.Errorf("error initializing GPG: %w", err)
}
defer mech.Close()
if err := mech.SupportsSigning(); err != nil {
- return nil, errors.Wrap(err, "signing is not supported")
+ return nil, fmt.Errorf("signing is not supported: %w", err)
}
sc := ir.Libpod.SystemContext()
sc.DockerCertPath = options.CertDir
@@ -624,11 +604,11 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
err = func() error {
srcRef, err := alltransports.ParseImageName(signimage)
if err != nil {
- return errors.Wrapf(err, "error parsing image name")
+ return fmt.Errorf("error parsing image name: %w", err)
}
rawSource, err := srcRef.NewImageSource(ctx, sc)
if err != nil {
- return errors.Wrapf(err, "error getting image source")
+ return fmt.Errorf("error getting image source: %w", err)
}
defer func() {
if err = rawSource.Close(); err != nil {
@@ -637,17 +617,17 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
}()
topManifestBlob, manifestType, err := rawSource.GetManifest(ctx, nil)
if err != nil {
- return errors.Wrapf(err, "error getting manifest blob")
+ return fmt.Errorf("error getting manifest blob: %w", err)
}
dockerReference := rawSource.Reference().DockerReference()
if dockerReference == nil {
- return errors.Errorf("cannot determine canonical Docker reference for destination %s", transports.ImageName(rawSource.Reference()))
+ return fmt.Errorf("cannot determine canonical Docker reference for destination %s", transports.ImageName(rawSource.Reference()))
}
var sigStoreDir string
if options.Directory != "" {
repo := reference.Path(dockerReference)
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
- return errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dockerReference.String())
+ return fmt.Errorf("unexpected path elements in Docker reference %s for signature storage", dockerReference.String())
}
sigStoreDir = filepath.Join(options.Directory, repo)
} else {
@@ -667,11 +647,11 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
if options.All {
if !manifest.MIMETypeIsMultiImage(manifestType) {
- return errors.Errorf("%s is not a multi-architecture image (manifest type %s)", signimage, manifestType)
+ return fmt.Errorf("%s is not a multi-architecture image (manifest type %s)", signimage, manifestType)
}
list, err := manifest.ListFromBlob(topManifestBlob, manifestType)
if err != nil {
- return errors.Wrapf(err, "Error parsing manifest list %q", string(topManifestBlob))
+ return fmt.Errorf("error parsing manifest list %q: %w", string(topManifestBlob), err)
}
instanceDigests := list.Instances()
for _, instanceDigest := range instanceDigests {
@@ -681,13 +661,13 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
return err
}
if err = putSignature(man, mech, sigStoreDir, instanceDigest, dockerReference, options); err != nil {
- return errors.Wrapf(err, "error storing signature for %s, %v", dockerReference.String(), instanceDigest)
+ return fmt.Errorf("error storing signature for %s, %v: %w", dockerReference.String(), instanceDigest, err)
}
}
return nil
}
if err = putSignature(topManifestBlob, mech, sigStoreDir, manifestDigest, dockerReference, options); err != nil {
- return errors.Wrapf(err, "error storing signature for %s, %v", dockerReference.String(), manifestDigest)
+ return fmt.Errorf("error storing signature for %s, %v: %w", dockerReference.String(), manifestDigest, err)
}
return nil
}()
@@ -698,53 +678,32 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
return nil, nil
}
-func getSigFilename(sigStoreDirPath string) (string, error) {
- sigFileSuffix := 1
- sigFiles, err := ioutil.ReadDir(sigStoreDirPath)
+func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool) error {
+ rep, source, dest, flags, err := domainUtils.ExecuteTransfer(src, dst, parentFlags, quiet)
if err != nil {
- return "", err
- }
- sigFilenames := make(map[string]bool)
- for _, file := range sigFiles {
- sigFilenames[file.Name()] = true
+ return err
}
- for {
- sigFilename := "signature-" + strconv.Itoa(sigFileSuffix)
- if _, exists := sigFilenames[sigFilename]; !exists {
- return sigFilename, nil
+ if (rep == nil && err == nil) && (source != nil && dest != nil) { // we need to execute the transfer
+ err := Transfer(ctx, *source, *dest, flags)
+ if err != nil {
+ return err
}
- sigFileSuffix++
}
+ return nil
}
-func localPathFromURI(url *url.URL) (string, error) {
- if url.Scheme != "file" {
- return "", errors.Errorf("writing to %s is not supported. Use a supported scheme", url.String())
- }
- return url.Path, nil
-}
-
-// putSignature creates signature and saves it to the signstore file
-func putSignature(manifestBlob []byte, mech signature.SigningMechanism, sigStoreDir string, instanceDigest digest.Digest, dockerReference dockerRef.Reference, options entities.SignOptions) error {
- newSig, err := signature.SignDockerManifest(manifestBlob, dockerReference.String(), mech, options.SignBy)
- if err != nil {
- return err
- }
- signatureDir := fmt.Sprintf("%s@%s=%s", sigStoreDir, instanceDigest.Algorithm(), instanceDigest.Hex())
- if err := os.MkdirAll(signatureDir, 0751); err != nil {
- // The directory is allowed to exist
- if !os.IsExist(err) {
- return err
- }
+func Transfer(ctx context.Context, source entities.ImageScpOptions, dest entities.ImageScpOptions, parentFlags []string) error {
+ if source.User == "" {
+ return fmt.Errorf("you must define a user when transferring from root to rootless storage: %w", define.ErrInvalidArg)
}
- sigFilename, err := getSigFilename(signatureDir)
+ podman, err := os.Executable()
if err != nil {
return err
}
- if err = ioutil.WriteFile(filepath.Join(signatureDir, sigFilename), newSig, 0644); err != nil {
- return err
+ if rootless.IsRootless() && (len(dest.User) == 0 || dest.User == "root") { // if we are rootless and do not have a destination user we can just use sudo
+ return transferRootless(source, dest, podman, parentFlags)
}
- return nil
+ return transferRootful(source, dest, podman, parentFlags)
}
// TransferRootless creates new podman processes using exec.Command and sudo, transferring images between the given source and destination users
@@ -767,7 +726,7 @@ func transferRootless(source entities.ImageScpOptions, dest entities.ImageScpOpt
} else {
cmdSave = exec.Command(podman)
}
- cmdSave = utils.CreateSCPCommand(cmdSave, saveCommand)
+ cmdSave = domainUtils.CreateSCPCommand(cmdSave, saveCommand)
logrus.Debugf("Executing save command: %q", cmdSave)
err := cmdSave.Run()
if err != nil {
@@ -780,8 +739,11 @@ func transferRootless(source entities.ImageScpOptions, dest entities.ImageScpOpt
} else {
cmdLoad = exec.Command(podman)
}
- cmdLoad = utils.CreateSCPCommand(cmdLoad, loadCommand)
+ cmdLoad = domainUtils.CreateSCPCommand(cmdLoad, loadCommand)
logrus.Debugf("Executing load command: %q", cmdLoad)
+ if len(dest.Tag) > 0 {
+ return domainUtils.ScpTag(cmdLoad, podman, dest)
+ }
return cmdLoad.Run()
}
@@ -837,11 +799,20 @@ func transferRootful(source entities.ImageScpOptions, dest entities.ImageScpOpti
return err
}
}
- err = execPodman(uSave, saveCommand)
+ _, err = execTransferPodman(uSave, saveCommand, false)
if err != nil {
return err
}
- return execPodman(uLoad, loadCommand)
+ out, err := execTransferPodman(uLoad, loadCommand, (len(dest.Tag) > 0))
+ if err != nil {
+ return err
+ }
+ if out != nil {
+ image := domainUtils.ExtractImage(out)
+ _, err := execTransferPodman(uLoad, []string{podman, "tag", image, dest.Tag}, false)
+ return err
+ }
+ return nil
}
func lookupUser(u string) (*user.User, error) {
@@ -851,10 +822,10 @@ func lookupUser(u string) (*user.User, error) {
return user.Lookup(u)
}
-func execPodman(execUser *user.User, command []string) error {
- cmdLogin, err := utils.LoginUser(execUser.Username)
+func execTransferPodman(execUser *user.User, command []string, needToTag bool) ([]byte, error) {
+ cmdLogin, err := domainUtils.LoginUser(execUser.Username)
if err != nil {
- return err
+ return nil, err
}
defer func() {
@@ -868,11 +839,11 @@ func execPodman(execUser *user.User, command []string) error {
cmd.Stdout = os.Stdout
uid, err := strconv.ParseInt(execUser.Uid, 10, 32)
if err != nil {
- return err
+ return nil, err
}
gid, err := strconv.ParseInt(execUser.Gid, 10, 32)
if err != nil {
- return err
+ return nil, err
}
cmd.SysProcAttr = &syscall.SysProcAttr{
Credential: &syscall.Credential{
@@ -882,5 +853,55 @@ func execPodman(execUser *user.User, command []string) error {
NoSetGroups: false,
},
}
- return cmd.Run()
+ if needToTag {
+ cmd.Stdout = nil
+ return cmd.Output()
+ }
+ return nil, cmd.Run()
+}
+
+func getSigFilename(sigStoreDirPath string) (string, error) {
+ sigFileSuffix := 1
+ sigFiles, err := ioutil.ReadDir(sigStoreDirPath)
+ if err != nil {
+ return "", err
+ }
+ sigFilenames := make(map[string]bool)
+ for _, file := range sigFiles {
+ sigFilenames[file.Name()] = true
+ }
+ for {
+ sigFilename := "signature-" + strconv.Itoa(sigFileSuffix)
+ if _, exists := sigFilenames[sigFilename]; !exists {
+ return sigFilename, nil
+ }
+ sigFileSuffix++
+ }
+}
+
+func localPathFromURI(url *url.URL) (string, error) {
+ if url.Scheme != "file" {
+ return "", fmt.Errorf("writing to %s is not supported. Use a supported scheme", url.String())
+ }
+ return url.Path, nil
+}
+
+// putSignature creates signature and saves it to the signstore file
+func putSignature(manifestBlob []byte, mech signature.SigningMechanism, sigStoreDir string, instanceDigest digest.Digest, dockerReference dockerRef.Reference, options entities.SignOptions) error {
+ newSig, err := signature.SignDockerManifest(manifestBlob, dockerReference.String(), mech, options.SignBy)
+ if err != nil {
+ return err
+ }
+ signatureDir := fmt.Sprintf("%s@%s=%s", sigStoreDir, instanceDigest.Algorithm(), instanceDigest.Hex())
+ if err := os.MkdirAll(signatureDir, 0751); err != nil {
+ // The directory is allowed to exist
+ if !errors.Is(err, fs.ErrExist) {
+ return err
+ }
+ }
+ sigFilename, err := getSigFilename(signatureDir)
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(filepath.Join(signatureDir, sigFilename), newSig, 0644)
}
diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go
index 9a0aaaf3a..96e99fbf0 100644
--- a/pkg/domain/infra/abi/images_list.go
+++ b/pkg/domain/infra/abi/images_list.go
@@ -2,10 +2,10 @@ package abi
import (
"context"
+ "fmt"
"github.com/containers/common/libimage"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
)
func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) {
@@ -28,17 +28,15 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
for _, img := range images {
repoDigests, err := img.RepoDigests()
if err != nil {
- return nil, errors.Wrapf(err, "getting repoDigests from image %q", img.ID())
+ return nil, fmt.Errorf("getting repoDigests from image %q: %w", img.ID(), err)
}
isDangling, err := img.IsDangling(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "error checking if image %q is dangling", img.ID())
+ return nil, fmt.Errorf("error checking if image %q is dangling: %w", img.ID(), err)
}
e := entities.ImageSummary{
- ID: img.ID(),
- // TODO: libpod/image didn't set it but libimage should
- // ConfigDigest: string(img.ConfigDigest),
+ ID: img.ID(),
Created: img.Created().Unix(),
Dangling: isDangling,
Digest: string(img.Digest()),
@@ -51,18 +49,18 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
}
e.Labels, err = img.Labels(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving label for image %q: you may need to remove the image to resolve the error", img.ID())
+ return nil, fmt.Errorf("error retrieving label for image %q: you may need to remove the image to resolve the error: %w", img.ID(), err)
}
ctnrs, err := img.Containers()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving containers for image %q: you may need to remove the image to resolve the error", img.ID())
+ return nil, fmt.Errorf("error retrieving containers for image %q: you may need to remove the image to resolve the error: %w", img.ID(), err)
}
e.Containers = len(ctnrs)
sz, err := img.Size()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving size of image %q: you may need to remove the image to resolve the error", img.ID())
+ return nil, fmt.Errorf("error retrieving size of image %q: you may need to remove the image to resolve the error: %w", img.ID(), err)
}
e.Size = sz
// This is good enough for now, but has to be
@@ -71,7 +69,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
parent, err := img.Parent(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving parent of image %q: you may need to remove the image to resolve the error", img.ID())
+ return nil, fmt.Errorf("error retrieving parent of image %q: you may need to remove the image to resolve the error: %w", img.ID(), err)
}
if parent != nil {
e.ParentId = parent.ID()
diff --git a/pkg/domain/infra/abi/images_test.go b/pkg/domain/infra/abi/images_test.go
index 311ab3ed7..3999de457 100644
--- a/pkg/domain/infra/abi/images_test.go
+++ b/pkg/domain/infra/abi/images_test.go
@@ -16,39 +16,3 @@ func TestToDomainHistoryLayer(t *testing.T) {
newLayer := toDomainHistoryLayer(&layer)
assert.Equal(t, layer.Size, newLayer.Size)
}
-
-//
-// import (
-// "context"
-// "testing"
-//
-// "github.com/stretchr/testify/mock"
-// )
-//
-// type MockImageRuntime struct {
-// mock.Mock
-// }
-//
-// func (m *MockImageRuntime) Delete(ctx context.Context, renderer func() interface{}, name string) error {
-// _ = m.Called(ctx, renderer, name)
-// return nil
-// }
-//
-// func TestImageSuccess(t *testing.T) {
-// actual := func() interface{} { return nil }
-//
-// m := new(MockImageRuntime)
-// m.On(
-// "Delete",
-// mock.AnythingOfType("*context.emptyCtx"),
-// mock.AnythingOfType("func() interface {}"),
-// "fedora").
-// Return(nil)
-//
-// r := DirectImageRuntime{m}
-// err := r.Delete(context.TODO(), actual, "fedora")
-// if err != nil {
-// t.Errorf("should be nil, got: %v", err)
-// }
-// m.AssertExpectations(t)
-// }
diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go
index 8b52c335c..d20744d76 100644
--- a/pkg/domain/infra/abi/manifest.go
+++ b/pkg/domain/infra/abi/manifest.go
@@ -4,9 +4,12 @@ import (
"bytes"
"context"
"encoding/json"
+ "fmt"
"os"
"strings"
+ "errors"
+
"github.com/containers/common/libimage"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/manifest"
@@ -17,7 +20,6 @@ import (
"github.com/containers/storage"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -46,7 +48,7 @@ func (ir *ImageEngine) ManifestCreate(ctx context.Context, name string, images [
func (ir *ImageEngine) ManifestExists(ctx context.Context, name string) (*entities.BoolReport, error) {
_, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
if err != nil {
- if errors.Cause(err) == storage.ErrImageUnknown {
+ if errors.Is(err, storage.ErrImageUnknown) {
return &entities.BoolReport{Value: false}, nil
}
return nil, err
@@ -63,15 +65,13 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string) ([]byte
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
if err != nil {
- switch errors.Cause(err) {
- // Do a remote inspect if there's no local image or if the
- // local image is not a manifest list.
- case storage.ErrImageUnknown, libimage.ErrNotAManifestList:
+ if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, libimage.ErrNotAManifestList) {
+ // Do a remote inspect if there's no local image or if the
+ // local image is not a manifest list.
return ir.remoteManifestInspect(ctx, name)
-
- default:
- return nil, err
}
+
+ return nil, err
}
schema2List, err := manifestList.Inspect()
@@ -86,7 +86,7 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string) ([]byte
var b bytes.Buffer
if err := json.Indent(&b, rawSchema2List, "", " "); err != nil {
- return nil, errors.Wrapf(err, "error rendering manifest %s for display", name)
+ return nil, fmt.Errorf("error rendering manifest %s for display: %w", name, err)
}
return b.Bytes(), nil
}
@@ -113,8 +113,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
// FIXME should we use multierror package instead?
// we want the new line here so ignore the linter
- //nolint:revive
- latestErr = errors.Wrapf(latestErr, "tried %v\n", e)
+ latestErr = fmt.Errorf("tried %v\n: %w", e, latestErr)
}
}
@@ -125,14 +124,14 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
}
src, err := ref.NewImageSource(ctx, sys)
if err != nil {
- appendErr(errors.Wrapf(err, "reading image %q", transports.ImageName(ref)))
+ appendErr(fmt.Errorf("reading image %q: %w", transports.ImageName(ref), err))
continue
}
defer src.Close()
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
if err != nil {
- appendErr(errors.Wrapf(err, "loading manifest %q", transports.ImageName(ref)))
+ appendErr(fmt.Errorf("loading manifest %q: %w", transports.ImageName(ref), err))
continue
}
@@ -150,7 +149,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
logrus.Warnf("The manifest type %s is not a manifest list but a single image.", manType)
schema2Manifest, err := manifest.Schema2FromManifest(result)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(result), manType)
+ return nil, fmt.Errorf("error parsing manifest blob %q as a %q: %w", string(result), manType, err)
}
if result, err = schema2Manifest.Serialize(); err != nil {
return nil, err
@@ -158,7 +157,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
default:
listBlob, err := manifest.ListFromBlob(result, manType)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(result), manType)
+ return nil, fmt.Errorf("error parsing manifest blob %q as a %q: %w", string(result), manType, err)
}
list, err := listBlob.ConvertToMIMEType(manifest.DockerV2ListMediaType)
if err != nil {
@@ -170,7 +169,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
}
if err = json.Indent(&b, result, "", " "); err != nil {
- return nil, errors.Wrapf(err, "error rendering manifest %s for display", name)
+ return nil, fmt.Errorf("error rendering manifest %s for display: %w", name, err)
}
return b.Bytes(), nil
}
@@ -213,7 +212,7 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st
for _, annotationSpec := range opts.Annotation {
spec := strings.SplitN(annotationSpec, "=", 2)
if len(spec) != 2 {
- return "", errors.Errorf("no value given for annotation %q", spec[0])
+ return "", fmt.Errorf("no value given for annotation %q", spec[0])
}
annotations[spec[0]] = spec[1]
}
@@ -231,7 +230,7 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st
func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, opts entities.ManifestAnnotateOptions) (string, error) {
instanceDigest, err := digest.Parse(image)
if err != nil {
- return "", errors.Errorf(`invalid image digest "%s": %v`, image, err)
+ return "", fmt.Errorf(`invalid image digest "%s": %v`, image, err)
}
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
@@ -251,7 +250,7 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string,
for _, annotationSpec := range opts.Annotation {
spec := strings.SplitN(annotationSpec, "=", 2)
if len(spec) != 2 {
- return "", errors.Errorf("no value given for annotation %q", spec[0])
+ return "", fmt.Errorf("no value given for annotation %q", spec[0])
}
annotations[spec[0]] = spec[1]
}
@@ -269,7 +268,7 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string,
func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name, image string) (string, error) {
instanceDigest, err := digest.Parse(image)
if err != nil {
- return "", errors.Errorf(`invalid image digest "%s": %v`, image, err)
+ return "", fmt.Errorf(`invalid image digest "%s": %v`, image, err)
}
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
@@ -293,7 +292,7 @@ func (ir *ImageEngine) ManifestRm(ctx context.Context, names []string) (report *
func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) {
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
if err != nil {
- return "", errors.Wrapf(err, "error retrieving local image from image name %s", name)
+ return "", fmt.Errorf("error retrieving local image from image name %s: %w", name, err)
}
var manifestType string
@@ -304,7 +303,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
case "v2s2", "docker":
manifestType = manifest.DockerV2Schema2MediaType
default:
- return "", errors.Errorf("unknown format %q. Choose one of the supported formats: 'oci' or 'v2s2'", opts.Format)
+ return "", fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci' or 'v2s2'", opts.Format)
}
}
@@ -333,7 +332,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
if opts.Rm {
if _, rmErrors := ir.Libpod.LibimageRuntime().RemoveImages(ctx, []string{manifestList.ID()}, nil); len(rmErrors) > 0 {
- return "", errors.Wrap(rmErrors[0], "error removing manifest after push")
+ return "", fmt.Errorf("error removing manifest after push: %w", rmErrors[0])
}
}
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index 910008fc7..2428abfe9 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -2,20 +2,51 @@ package abi
import (
"context"
+ "errors"
+ "fmt"
+ "strconv"
"github.com/containers/common/libnetwork/types"
netutil "github.com/containers/common/libnetwork/util"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]types.Network, error) {
+ // dangling filter is not provided by netutil
+ var wantDangling bool
+
+ val, filterDangling := options.Filters["dangling"]
+ if filterDangling {
+ switch len(val) {
+ case 0:
+ return nil, fmt.Errorf("got no values for filter key \"dangling\"")
+ case 1:
+ var err error
+ wantDangling, err = strconv.ParseBool(val[0])
+ if err != nil {
+ return nil, fmt.Errorf("invalid dangling filter value \"%v\"", val[0])
+ }
+ delete(options.Filters, "dangling")
+ default:
+ return nil, fmt.Errorf("got more than one value for filter key \"dangling\"")
+ }
+ }
+
filters, err := netutil.GenerateNetworkFilters(options.Filters)
if err != nil {
return nil, err
}
+
+ if filterDangling {
+ danglingFilterFunc, err := ic.createDanglingFilterFunc(wantDangling)
+ if err != nil {
+ return nil, err
+ }
+
+ filters = append(filters, danglingFilterFunc)
+ }
nets, err := ic.Libpod.Network().NetworkList(filters...)
return nets, err
}
@@ -26,11 +57,11 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri
for _, name := range namesOrIds {
net, err := ic.Libpod.Network().NetworkInspect(name)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchNetwork {
- errs = append(errs, errors.Wrapf(err, "network %s", name))
+ if errors.Is(err, define.ErrNoSuchNetwork) {
+ errs = append(errs, fmt.Errorf("network %s: %w", name, err))
continue
} else {
- return nil, nil, errors.Wrapf(err, "error inspecting network %s", name)
+ return nil, nil, fmt.Errorf("error inspecting network %s: %w", name, err)
}
}
networks = append(networks, net)
@@ -50,8 +81,8 @@ func (ic *ContainerEngine) NetworkReload(ctx context.Context, names []string, op
report.Id = ctr.ID()
report.Err = ctr.ReloadNetwork()
// ignore errors for invalid ctr state and network mode when --all is used
- if options.All && (errors.Cause(report.Err) == define.ErrCtrStateInvalid ||
- errors.Cause(report.Err) == define.ErrNetworkModeInvalid) {
+ if options.All && (errors.Is(report.Err, define.ErrCtrStateInvalid) ||
+ errors.Is(report.Err, define.ErrNetworkModeInvalid)) {
continue
}
reports = append(reports, report)
@@ -83,7 +114,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
// if user passes force, we nuke containers and pods
if !options.Force {
// Without the force option, we return an error
- return reports, errors.Wrapf(define.ErrNetworkInUse, "%q has associated containers with it. Use -f to forcibly delete containers and pods", name)
+ return reports, fmt.Errorf("%q has associated containers with it. Use -f to forcibly delete containers and pods: %w", name, define.ErrNetworkInUse)
}
if c.IsInfra() {
// if we have a infra container we need to remove the pod
@@ -94,7 +125,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
if err := ic.Libpod.RemovePod(ctx, pod, true, true, options.Timeout); err != nil {
return reports, err
}
- } else if err := ic.Libpod.RemoveContainer(ctx, c, true, true, options.Timeout); err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
+ } else if err := ic.Libpod.RemoveContainer(ctx, c, true, true, options.Timeout); err != nil && !errors.Is(err, define.ErrNoSuchCtr) {
return reports, err
}
}
@@ -109,7 +140,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
func (ic *ContainerEngine) NetworkCreate(ctx context.Context, network types.Network) (*types.Network, error) {
if util.StringInSlice(network.Name, []string{"none", "host", "bridge", "private", "slirp4netns", "container", "ns"}) {
- return nil, errors.Errorf("cannot create network with name %q because it conflicts with a valid network mode", network.Name)
+ return nil, fmt.Errorf("cannot create network with name %q because it conflicts with a valid network mode", network.Name)
}
network, err := ic.Libpod.Network().NetworkCreate(network)
if err != nil {
@@ -142,8 +173,35 @@ func (ic *ContainerEngine) NetworkExists(ctx context.Context, networkname string
}, nil
}
-// Network prune removes unused cni networks
+// Network prune removes unused networks
func (ic *ContainerEngine) NetworkPrune(ctx context.Context, options entities.NetworkPruneOptions) ([]*entities.NetworkPruneReport, error) {
+ // get all filters
+ filters, err := netutil.GenerateNetworkPruneFilters(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ danglingFilterFunc, err := ic.createDanglingFilterFunc(true)
+ if err != nil {
+ return nil, err
+ }
+ filters = append(filters, danglingFilterFunc)
+ nets, err := ic.Libpod.Network().NetworkList(filters...)
+ if err != nil {
+ return nil, err
+ }
+
+ pruneReport := make([]*entities.NetworkPruneReport, 0, len(nets))
+ for _, net := range nets {
+ pruneReport = append(pruneReport, &entities.NetworkPruneReport{
+ Name: net.Name,
+ Error: ic.Libpod.Network().NetworkRemove(net.Name),
+ })
+ }
+ return pruneReport, nil
+}
+
+// danglingFilter function is special and not implemented in libnetwork filters
+func (ic *ContainerEngine) createDanglingFilterFunc(wantDangling bool) (types.FilterFunc, error) {
cons, err := ic.Libpod.GetAllContainers()
if err != nil {
return nil, err
@@ -163,31 +221,12 @@ func (ic *ContainerEngine) NetworkPrune(ctx context.Context, options entities.Ne
// ignore the default network, this one cannot be deleted
networksToKeep[ic.Libpod.GetDefaultNetworkName()] = true
- // get all filters
- filters, err := netutil.GenerateNetworkPruneFilters(options.Filters)
- if err != nil {
- return nil, err
- }
- danglingFilterFunc := func(net types.Network) bool {
+ return func(net types.Network) bool {
for network := range networksToKeep {
if network == net.Name {
- return false
+ return !wantDangling
}
}
- return true
- }
- filters = append(filters, danglingFilterFunc)
- nets, err := ic.Libpod.Network().NetworkList(filters...)
- if err != nil {
- return nil, err
- }
-
- pruneReport := make([]*entities.NetworkPruneReport, 0, len(nets))
- for _, net := range nets {
- pruneReport = append(pruneReport, &entities.NetworkPruneReport{
- Name: net.Name,
- Error: ic.Libpod.Network().NetworkRemove(net.Name),
- })
- }
- return pruneReport, nil
+ return wantDangling
+ }, nil
}
diff --git a/pkg/domain/infra/abi/parse/parse.go b/pkg/domain/infra/abi/parse/parse.go
index 66794e592..19699589b 100644
--- a/pkg/domain/infra/abi/parse/parse.go
+++ b/pkg/domain/infra/abi/parse/parse.go
@@ -1,13 +1,13 @@
package parse
import (
+ "fmt"
"strconv"
"strings"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
units "github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -32,7 +32,7 @@ func VolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error)
case "size":
size, err := units.FromHumanSize(splitO[1])
if err != nil {
- return nil, errors.Wrapf(err, "cannot convert size %s to integer", splitO[1])
+ return nil, fmt.Errorf("cannot convert size %s to integer: %w", splitO[1], err)
}
libpodOptions = append(libpodOptions, libpod.WithVolumeSize(uint64(size)))
finalVal = append(finalVal, o)
@@ -41,7 +41,7 @@ func VolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error)
case "inodes":
inodes, err := strconv.ParseUint(splitO[1], 10, 64)
if err != nil {
- return nil, errors.Wrapf(err, "cannot convert inodes %s to integer", splitO[1])
+ return nil, fmt.Errorf("cannot convert inodes %s to integer: %w", splitO[1], err)
}
libpodOptions = append(libpodOptions, libpod.WithVolumeInodes(inodes))
finalVal = append(finalVal, o)
@@ -49,11 +49,11 @@ func VolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error)
volumeOptions["INODES"] = splitO[1]
case "uid":
if len(splitO) != 2 {
- return nil, errors.Wrapf(define.ErrInvalidArg, "uid option must provide a UID")
+ return nil, fmt.Errorf("uid option must provide a UID: %w", define.ErrInvalidArg)
}
intUID, err := strconv.Atoi(splitO[1])
if err != nil {
- return nil, errors.Wrapf(err, "cannot convert UID %s to integer", splitO[1])
+ return nil, fmt.Errorf("cannot convert UID %s to integer: %w", splitO[1], err)
}
logrus.Debugf("Removing uid= from options and adding WithVolumeUID for UID %d", intUID)
libpodOptions = append(libpodOptions, libpod.WithVolumeUID(intUID), libpod.WithVolumeNoChown())
@@ -62,11 +62,11 @@ func VolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error)
volumeOptions["UID"] = splitO[1]
case "gid":
if len(splitO) != 2 {
- return nil, errors.Wrapf(define.ErrInvalidArg, "gid option must provide a GID")
+ return nil, fmt.Errorf("gid option must provide a GID: %w", define.ErrInvalidArg)
}
intGID, err := strconv.Atoi(splitO[1])
if err != nil {
- return nil, errors.Wrapf(err, "cannot convert GID %s to integer", splitO[1])
+ return nil, fmt.Errorf("cannot convert GID %s to integer: %w", splitO[1], err)
}
logrus.Debugf("Removing gid= from options and adding WithVolumeGID for GID %d", intGID)
libpodOptions = append(libpodOptions, libpod.WithVolumeGID(intGID), libpod.WithVolumeNoChown())
@@ -78,6 +78,16 @@ func VolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error)
libpodOptions = append(libpodOptions, libpod.WithVolumeDisableQuota())
// set option "NOQUOTA": "true"
volumeOptions["NOQUOTA"] = "true"
+ case "timeout":
+ if len(splitO) != 2 {
+ return nil, fmt.Errorf("timeout option must provide a valid timeout in seconds: %w", define.ErrInvalidArg)
+ }
+ intTimeout, err := strconv.Atoi(splitO[1])
+ if err != nil {
+ return nil, fmt.Errorf("cannot convert Timeout %s to an integer: %w", splitO[1], err)
+ }
+ logrus.Debugf("Removing timeout from options and adding WithTimeout for Timeout %d", intTimeout)
+ libpodOptions = append(libpodOptions, libpod.WithVolumeDriverTimeout(intTimeout))
default:
finalVal = append(finalVal, o)
}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index e04ab3a1a..c913fdb69 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -3,6 +3,7 @@ package abi
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -29,9 +30,8 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
- yamlv2 "gopkg.in/yaml.v2"
+ yamlv3 "gopkg.in/yaml.v3"
)
// createServiceContainer creates a container that can later on
@@ -114,7 +114,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
// sort kube kinds
documentList, err = sortKubeKinds(documentList)
if err != nil {
- return nil, errors.Wrap(err, "unable to sort kube kinds")
+ return nil, fmt.Errorf("unable to sort kube kinds: %w", err)
}
ipIndex := 0
@@ -126,7 +126,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
for _, document := range documentList {
kind, err := getKubeKind(document)
if err != nil {
- return nil, errors.Wrap(err, "unable to read kube YAML")
+ return nil, fmt.Errorf("unable to read kube YAML: %w", err)
}
// TODO: create constants for the various "kinds" of yaml files.
@@ -154,14 +154,14 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
var podTemplateSpec v1.PodTemplateSpec
if err := yaml.Unmarshal(document, &podYAML); err != nil {
- return nil, errors.Wrap(err, "unable to read YAML as Kube Pod")
+ return nil, fmt.Errorf("unable to read YAML as Kube Pod: %w", err)
}
podTemplateSpec.ObjectMeta = podYAML.ObjectMeta
podTemplateSpec.Spec = podYAML.Spec
for name, val := range podYAML.Annotations {
if len(val) > define.MaxKubeAnnotation {
- return nil, errors.Errorf("invalid annotation %q=%q value length exceeds Kubernetetes max %d", name, val, define.MaxKubeAnnotation)
+ return nil, fmt.Errorf("invalid annotation %q=%q value length exceeds Kubernetetes max %d", name, val, define.MaxKubeAnnotation)
}
}
for name, val := range options.Annotations {
@@ -182,7 +182,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
var deploymentYAML v1apps.Deployment
if err := yaml.Unmarshal(document, &deploymentYAML); err != nil {
- return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
+ return nil, fmt.Errorf("unable to read YAML as Kube Deployment: %w", err)
}
r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps, serviceContainer)
@@ -196,7 +196,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
var pvcYAML v1.PersistentVolumeClaim
if err := yaml.Unmarshal(document, &pvcYAML); err != nil {
- return nil, errors.Wrap(err, "unable to read YAML as Kube PersistentVolumeClaim")
+ return nil, fmt.Errorf("unable to read YAML as Kube PersistentVolumeClaim: %w", err)
}
r, err := ic.playKubePVC(ctx, &pvcYAML)
@@ -210,7 +210,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
var configMap v1.ConfigMap
if err := yaml.Unmarshal(document, &configMap); err != nil {
- return nil, errors.Wrap(err, "unable to read YAML as Kube ConfigMap")
+ return nil, fmt.Errorf("unable to read YAML as Kube ConfigMap: %w", err)
}
configMaps = append(configMaps, configMap)
default:
@@ -240,7 +240,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
deploymentName = deploymentYAML.ObjectMeta.Name
if deploymentName == "" {
- return nil, errors.Errorf("Deployment does not have a name")
+ return nil, errors.New("deployment does not have a name")
}
numReplicas = 1
if deploymentYAML.Spec.Replicas != nil {
@@ -253,7 +253,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps, serviceContainer)
if err != nil {
- return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
+ return nil, fmt.Errorf("error encountered while bringing up pod %s: %w", podName, err)
}
report.Pods = append(report.Pods, podReport.Pods...)
}
@@ -275,7 +275,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
// Assert the pod has a name
if podName == "" {
- return nil, errors.Errorf("pod does not have a name")
+ return nil, fmt.Errorf("pod does not have a name")
}
podOpt := entities.PodCreateOptions{
@@ -295,7 +295,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
if (ns.IsBridge() && len(networks) == 0) || ns.IsHost() {
- return nil, errors.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML")
+ return nil, fmt.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML")
}
podOpt.Net.Network = ns
@@ -316,10 +316,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
// FIXME This is very hard to support properly with a good ux
if len(options.StaticIPs) > *ipIndex {
if !podOpt.Net.Network.IsBridge() {
- return nil, errors.Wrap(define.ErrInvalidArg, "static ip addresses can only be set when the network mode is bridge")
+ return nil, fmt.Errorf("static ip addresses can only be set when the network mode is bridge: %w", define.ErrInvalidArg)
}
if len(podOpt.Net.Networks) != 1 {
- return nil, errors.Wrap(define.ErrInvalidArg, "cannot set static ip addresses for more than network, use netname:ip=<ip> syntax to specify ips for more than network")
+ return nil, fmt.Errorf("cannot set static ip addresses for more than network, use netname:ip=<ip> syntax to specify ips for more than network: %w", define.ErrInvalidArg)
}
for name, netOpts := range podOpt.Net.Networks {
netOpts.StaticIPs = append(netOpts.StaticIPs, options.StaticIPs[*ipIndex])
@@ -331,10 +331,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
if len(options.StaticMACs) > *ipIndex {
if !podOpt.Net.Network.IsBridge() {
- return nil, errors.Wrap(define.ErrInvalidArg, "static mac address can only be set when the network mode is bridge")
+ return nil, fmt.Errorf("static mac address can only be set when the network mode is bridge: %w", define.ErrInvalidArg)
}
if len(podOpt.Net.Networks) != 1 {
- return nil, errors.Wrap(define.ErrInvalidArg, "cannot set static mac address for more than network, use netname:mac=<mac> syntax to specify mac for more than network")
+ return nil, fmt.Errorf("cannot set static mac address for more than network, use netname:mac=<mac> syntax to specify mac for more than network: %w", define.ErrInvalidArg)
}
for name, netOpts := range podOpt.Net.Networks {
netOpts.StaticMAC = nettypes.HardwareAddr(options.StaticMACs[*ipIndex])
@@ -370,11 +370,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
cm, err := readConfigMapFromFile(f)
if err != nil {
- return nil, errors.Wrapf(err, "%q", p)
+ return nil, fmt.Errorf("%q: %w", p, err)
}
if _, present := configMapIndex[cm.Name]; present {
- return nil, errors.Errorf("ambiguous configuration: the same config map %s is present in YAML and in --configmaps %s file", cm.Name, p)
+ return nil, fmt.Errorf("ambiguous configuration: the same config map %s is present in YAML and in --configmaps %s file", cm.Name, p)
}
configMaps = append(configMaps, cm)
@@ -396,22 +396,22 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
// error out instead reuse the current volume.
vol, err = ic.Libpod.GetVolume(v.Source)
if err != nil {
- return nil, errors.Wrapf(err, "cannot re-use local volume for volume from configmap %q", v.Source)
+ return nil, fmt.Errorf("cannot re-use local volume for volume from configmap %q: %w", v.Source, err)
}
} else {
- return nil, errors.Wrapf(err, "cannot create a local volume for volume from configmap %q", v.Source)
+ return nil, fmt.Errorf("cannot create a local volume for volume from configmap %q: %w", v.Source, err)
}
}
mountPoint, err := vol.MountPoint()
if err != nil || mountPoint == "" {
- return nil, errors.Wrapf(err, "unable to get mountpoint of volume %q", vol.Name())
+ return nil, fmt.Errorf("unable to get mountpoint of volume %q: %w", vol.Name(), err)
}
// Create files and add data to the volume mountpoint based on the Items in the volume
for k, v := range v.Items {
dataPath := filepath.Join(mountPoint, k)
f, err := os.Create(dataPath)
if err != nil {
- return nil, errors.Wrapf(err, "cannot create file %q at volume mountpoint %q", k, mountPoint)
+ return nil, fmt.Errorf("cannot create file %q at volume mountpoint %q: %w", k, mountPoint, err)
}
defer f.Close()
_, err = f.WriteString(v)
@@ -492,12 +492,12 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
for _, initCtr := range podYAML.Spec.InitContainers {
// Error out if same name is used for more than one container
if _, ok := ctrNames[initCtr.Name]; ok {
- return nil, errors.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, initCtr.Name)
+ return nil, fmt.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, initCtr.Name)
}
ctrNames[initCtr.Name] = ""
// Init containers cannot have either of lifecycle, livenessProbe, readinessProbe, or startupProbe set
if initCtr.Lifecycle != nil || initCtr.LivenessProbe != nil || initCtr.ReadinessProbe != nil || initCtr.StartupProbe != nil {
- return nil, errors.Errorf("cannot create an init container that has either of lifecycle, livenessProbe, readinessProbe, or startupProbe set")
+ return nil, fmt.Errorf("cannot create an init container that has either of lifecycle, livenessProbe, readinessProbe, or startupProbe set")
}
pulledImage, labels, err := ic.getImageAndLabelInfo(ctx, cwd, annotations, writer, initCtr, options)
if err != nil {
@@ -548,7 +548,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
for _, container := range podYAML.Spec.Containers {
// Error out if the same name is used for more than one container
if _, ok := ctrNames[container.Name]; ok {
- return nil, errors.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, container.Name)
+ return nil, fmt.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, container.Name)
}
ctrNames[container.Name] = ""
pulledImage, labels, err := ic.getImageAndLabelInfo(ctx, cwd, annotations, writer, container, options)
@@ -599,11 +599,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if options.Start != types.OptionalBoolFalse {
// Start the containers
podStartErrors, err := pod.Start(ctx)
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
return nil, err
}
for id, err := range podStartErrors {
- playKubePod.ContainerErrors = append(playKubePod.ContainerErrors, errors.Wrapf(err, "error starting container %s", id).Error())
+ playKubePod.ContainerErrors = append(playKubePod.ContainerErrors, fmt.Errorf("error starting container %s: %w", id, err).Error())
fmt.Println(playKubePod.ContainerErrors)
}
}
@@ -735,14 +735,14 @@ func (ic *ContainerEngine) playKubePVC(ctx context.Context, pvcYAML *v1.Persiste
case util.VolumeUIDAnnotation:
uid, err := strconv.Atoi(v)
if err != nil {
- return nil, errors.Wrapf(err, "cannot convert uid %s to integer", v)
+ return nil, fmt.Errorf("cannot convert uid %s to integer: %w", v, err)
}
volOptions = append(volOptions, libpod.WithVolumeUID(uid))
opts["UID"] = v
case util.VolumeGIDAnnotation:
gid, err := strconv.Atoi(v)
if err != nil {
- return nil, errors.Wrapf(err, "cannot convert gid %s to integer", v)
+ return nil, fmt.Errorf("cannot convert gid %s to integer: %w", v, err)
}
volOptions = append(volOptions, libpod.WithVolumeGID(gid))
opts["GID"] = v
@@ -771,15 +771,15 @@ func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
content, err := ioutil.ReadAll(r)
if err != nil {
- return cm, errors.Wrapf(err, "unable to read ConfigMap YAML content")
+ return cm, fmt.Errorf("unable to read ConfigMap YAML content: %w", err)
}
if err := yaml.Unmarshal(content, &cm); err != nil {
- return cm, errors.Wrapf(err, "unable to read YAML as Kube ConfigMap")
+ return cm, fmt.Errorf("unable to read YAML as Kube ConfigMap: %w", err)
}
if cm.Kind != "ConfigMap" {
- return cm, errors.Errorf("invalid YAML kind: %q. [ConfigMap] is the only supported by --configmap", cm.Kind)
+ return cm, fmt.Errorf("invalid YAML kind: %q. [ConfigMap] is the only supported by --configmap", cm.Kind)
}
return cm, nil
@@ -790,7 +790,7 @@ func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
func splitMultiDocYAML(yamlContent []byte) ([][]byte, error) {
var documentList [][]byte
- d := yamlv2.NewDecoder(bytes.NewReader(yamlContent))
+ d := yamlv3.NewDecoder(bytes.NewReader(yamlContent))
for {
var o interface{}
// read individual document
@@ -799,14 +799,14 @@ func splitMultiDocYAML(yamlContent []byte) ([][]byte, error) {
break
}
if err != nil {
- return nil, errors.Wrapf(err, "multi doc yaml could not be split")
+ return nil, fmt.Errorf("multi doc yaml could not be split: %w", err)
}
if o != nil {
// back to bytes
- document, err := yamlv2.Marshal(o)
+ document, err := yamlv3.Marshal(o)
if err != nil {
- return nil, errors.Wrapf(err, "individual doc yaml could not be marshalled")
+ return nil, fmt.Errorf("individual doc yaml could not be marshalled: %w", err)
}
documentList = append(documentList, document)
@@ -915,27 +915,27 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, _ e
// sort kube kinds
documentList, err = sortKubeKinds(documentList)
if err != nil {
- return nil, errors.Wrap(err, "unable to sort kube kinds")
+ return nil, fmt.Errorf("unable to sort kube kinds: %w", err)
}
for _, document := range documentList {
kind, err := getKubeKind(document)
if err != nil {
- return nil, errors.Wrap(err, "unable to read as kube YAML")
+ return nil, fmt.Errorf("unable to read as kube YAML: %w", err)
}
switch kind {
case "Pod":
var podYAML v1.Pod
if err := yaml.Unmarshal(document, &podYAML); err != nil {
- return nil, errors.Wrap(err, "unable to read YAML as Kube Pod")
+ return nil, fmt.Errorf("unable to read YAML as Kube Pod: %w", err)
}
podNames = append(podNames, podYAML.ObjectMeta.Name)
case "Deployment":
var deploymentYAML v1apps.Deployment
if err := yaml.Unmarshal(document, &deploymentYAML); err != nil {
- return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
+ return nil, fmt.Errorf("unable to read YAML as Kube Deployment: %w", err)
}
var numReplicas int32 = 1
deploymentName := deploymentYAML.ObjectMeta.Name
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
index 32deb20e0..9a16f7fcd 100644
--- a/pkg/domain/infra/abi/pods.go
+++ b/pkg/domain/infra/abi/pods.go
@@ -2,14 +2,18 @@ package abi
import (
"context"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
dfilters "github.com/containers/podman/v4/pkg/domain/filters"
"github.com/containers/podman/v4/pkg/signal"
+ "github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgen/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -46,7 +50,7 @@ func getPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime)
func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
_, err := ic.Libpod.LookupPod(nameOrID)
- if err != nil && errors.Cause(err) != define.ErrNoSuchPod {
+ if err != nil && !errors.Is(err, define.ErrNoSuchPod) {
return nil, err
}
return &entities.BoolReport{Value: err == nil}, nil
@@ -66,14 +70,14 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt
for _, p := range pods {
report := entities.PodKillReport{Id: p.ID()}
conErrs, err := p.Kill(ctx, uint(sig))
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err}
reports = append(reports, &report)
continue
}
if len(conErrs) > 0 {
for id, err := range conErrs {
- report.Errs = append(report.Errs, errors.Wrapf(err, "error killing container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error killing container %s: %w", id, err))
}
reports = append(reports, &report)
continue
@@ -107,7 +111,7 @@ func (ic *ContainerEngine) PodLogs(ctx context.Context, nameOrID string, options
}
}
if !ctrFound {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not in pod %s", options.ContainerName, nameOrID)
+ return fmt.Errorf("container %s is not in pod %s: %w", options.ContainerName, nameOrID, define.ErrNoSuchCtr)
}
} else {
// No container name specified select all containers
@@ -132,13 +136,13 @@ func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, op
for _, p := range pods {
report := entities.PodPauseReport{Id: p.ID()}
errs, err := p.Pause(ctx)
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err}
continue
}
if len(errs) > 0 {
for id, v := range errs {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error pausing container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error pausing container %s: %w", id, v))
}
reports = append(reports, &report)
continue
@@ -155,15 +159,23 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
return nil, err
}
for _, p := range pods {
+ status, err := p.GetPodStatus()
+ if err != nil {
+ return nil, err
+ }
+ // If the pod is not paused or degraded, there is no need to attempt an unpause on it
+ if status != define.PodStatePaused && status != define.PodStateDegraded {
+ continue
+ }
report := entities.PodUnpauseReport{Id: p.ID()}
errs, err := p.Unpause(ctx)
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err}
continue
}
if len(errs) > 0 {
for id, v := range errs {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error unpausing container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error unpausing container %s: %w", id, v))
}
reports = append(reports, &report)
continue
@@ -176,19 +188,19 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
reports := []*entities.PodStopReport{}
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
- if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
+ if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
return nil, err
}
for _, p := range pods {
report := entities.PodStopReport{Id: p.ID()}
errs, err := p.StopWithTimeout(ctx, false, options.Timeout)
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err}
continue
}
if len(errs) > 0 {
for id, v := range errs {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error stopping container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error stopping container %s: %w", id, v))
}
reports = append(reports, &report)
continue
@@ -207,14 +219,14 @@ func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string,
for _, p := range pods {
report := entities.PodRestartReport{Id: p.ID()}
errs, err := p.Restart(ctx)
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err}
reports = append(reports, &report)
continue
}
if len(errs) > 0 {
for id, v := range errs {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error restarting container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error restarting container %s: %w", id, v))
}
reports = append(reports, &report)
continue
@@ -234,14 +246,14 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
for _, p := range pods {
report := entities.PodStartReport{Id: p.ID()}
errs, err := p.Start(ctx)
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err}
reports = append(reports, &report)
continue
}
if len(errs) > 0 {
for id, v := range errs {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error starting container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error starting container %s: %w", id, v))
}
reports = append(reports, &report)
continue
@@ -253,7 +265,7 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
- if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
+ if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
return nil, err
}
reports := make([]*entities.PodRmReport, 0, len(pods))
@@ -295,6 +307,88 @@ func (ic *ContainerEngine) PodCreate(ctx context.Context, specg entities.PodSpec
return &entities.PodCreateReport{Id: pod.ID()}, nil
}
+func (ic *ContainerEngine) PodClone(ctx context.Context, podClone entities.PodCloneOptions) (*entities.PodCloneReport, error) {
+ spec := specgen.NewPodSpecGenerator()
+ p, err := generate.PodConfigToSpec(ic.Libpod, spec, &podClone.InfraOptions, podClone.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(podClone.CreateOpts.Name) > 0 {
+ spec.Name = podClone.CreateOpts.Name
+ } else {
+ n := p.Name()
+ _, err := ic.Libpod.LookupPod(n + "-clone")
+ if err == nil {
+ n += "-clone"
+ }
+ switch {
+ case strings.Contains(n, "-clone"): // meaning this name is taken!
+ ind := strings.Index(n, "-clone") + 6
+ num, err := strconv.Atoi(n[ind:])
+ if num == 0 && err != nil { // meaning invalid
+ _, err = ic.Libpod.LookupPod(n + "1")
+ if err != nil {
+ spec.Name = n + "1"
+ break
+ }
+ } else { // else we already have a number
+ n = n[0:ind]
+ }
+ err = nil
+ count := num
+ for err == nil { // until we cannot find a pod w/ this name, increment num and try again
+ count++
+ tempN := n + strconv.Itoa(count)
+ _, err = ic.Libpod.LookupPod(tempN)
+ }
+ n += strconv.Itoa(count)
+ spec.Name = n
+ default:
+ spec.Name = p.Name() + "-clone"
+ }
+ }
+
+ podSpec := entities.PodSpec{PodSpecGen: *spec}
+ pod, err := generate.MakePod(&podSpec, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrs, err := p.AllContainers()
+ if err != nil {
+ return nil, err
+ }
+ for _, ctr := range ctrs {
+ if ctr.IsInfra() {
+ continue // already copied infra
+ }
+
+ podClone.PerContainerOptions.Pod = pod.ID()
+ _, err := ic.ContainerClone(ctx, entities.ContainerCloneOptions{ID: ctr.ID(), CreateOpts: podClone.PerContainerOptions})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if podClone.Destroy {
+ var timeout *uint
+ err = ic.Libpod.RemovePod(ctx, p, true, true, timeout)
+ if err != nil {
+ return &entities.PodCloneReport{Id: pod.ID()}, err
+ }
+ }
+
+ if podClone.Start {
+ _, err := ic.PodStart(ctx, []string{pod.ID()}, entities.PodStartOptions{})
+ if err != nil {
+ return &entities.PodCloneReport{Id: pod.ID()}, err
+ }
+ }
+
+ return &entities.PodCloneReport{Id: pod.ID()}, nil
+}
+
func (ic *ContainerEngine) PodTop(ctx context.Context, options entities.PodTopOptions) (*entities.StringSliceReport, error) {
var (
pod *libpod.Pod
@@ -308,7 +402,7 @@ func (ic *ContainerEngine) PodTop(ctx context.Context, options entities.PodTopOp
pod, err = ic.Libpod.LookupPod(options.NameOrID)
}
if err != nil {
- return nil, errors.Wrap(err, "unable to lookup requested container")
+ return nil, fmt.Errorf("unable to look up requested container: %w", err)
}
// Run Top.
@@ -317,6 +411,56 @@ func (ic *ContainerEngine) PodTop(ctx context.Context, options entities.PodTopOp
return report, err
}
+func (ic *ContainerEngine) listPodReportFromPod(p *libpod.Pod) (*entities.ListPodsReport, error) {
+ status, err := p.GetPodStatus()
+ if err != nil {
+ return nil, err
+ }
+ cons, err := p.AllContainers()
+ if err != nil {
+ return nil, err
+ }
+ lpcs := make([]*entities.ListPodContainer, len(cons))
+ for i, c := range cons {
+ state, err := c.State()
+ if err != nil {
+ return nil, err
+ }
+ lpcs[i] = &entities.ListPodContainer{
+ Id: c.ID(),
+ Names: c.Name(),
+ Status: state.String(),
+ }
+ }
+ infraID, err := p.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+ networks := []string{}
+ if len(infraID) > 0 {
+ infra, err := p.InfraContainer()
+ if err != nil {
+ return nil, err
+ }
+ networks, err = infra.Networks()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &entities.ListPodsReport{
+ Cgroup: p.CgroupParent(),
+ Containers: lpcs,
+ Created: p.CreatedTime(),
+ Id: p.ID(),
+ InfraId: infraID,
+ Name: p.Name(),
+ Namespace: p.Namespace(),
+ Networks: networks,
+ Status: status,
+ Labels: p.Labels(),
+ }, nil
+}
+
func (ic *ContainerEngine) PodPs(ctx context.Context, options entities.PodPSOptions) ([]*entities.ListPodsReport, error) {
var (
err error
@@ -346,53 +490,14 @@ func (ic *ContainerEngine) PodPs(ctx context.Context, options entities.PodPSOpti
reports := make([]*entities.ListPodsReport, 0, len(pds))
for _, p := range pds {
- var lpcs []*entities.ListPodContainer
- status, err := p.GetPodStatus()
+ r, err := ic.listPodReportFromPod(p)
if err != nil {
- return nil, err
- }
- cons, err := p.AllContainers()
- if err != nil {
- return nil, err
- }
- for _, c := range cons {
- state, err := c.State()
- if err != nil {
- return nil, err
+ if errors.Is(err, define.ErrNoSuchPod) || errors.Is(err, define.ErrNoSuchCtr) {
+ continue
}
- lpcs = append(lpcs, &entities.ListPodContainer{
- Id: c.ID(),
- Names: c.Name(),
- Status: state.String(),
- })
- }
- infraID, err := p.InfraContainerID()
- if err != nil {
return nil, err
}
- networks := []string{}
- if len(infraID) > 0 {
- infra, err := p.InfraContainer()
- if err != nil {
- return nil, err
- }
- networks, err = infra.Networks()
- if err != nil {
- return nil, err
- }
- }
- reports = append(reports, &entities.ListPodsReport{
- Cgroup: p.CgroupParent(),
- Containers: lpcs,
- Created: p.CreatedTime(),
- Id: p.ID(),
- InfraId: infraID,
- Name: p.Name(),
- Namespace: p.Namespace(),
- Networks: networks,
- Status: status,
- Labels: p.Labels(),
- })
+ reports = append(reports, r)
}
return reports, nil
}
@@ -409,7 +514,7 @@ func (ic *ContainerEngine) PodInspect(ctx context.Context, options entities.PodI
pod, err = ic.Libpod.LookupPod(options.NameOrID)
}
if err != nil {
- return nil, errors.Wrap(err, "unable to lookup requested container")
+ return nil, fmt.Errorf("unable to look up requested container: %w", err)
}
inspect, err := pod.Inspect()
if err != nil {
diff --git a/pkg/domain/infra/abi/pods_stats.go b/pkg/domain/infra/abi/pods_stats.go
index 6123027b8..a270db769 100644
--- a/pkg/domain/infra/abi/pods_stats.go
+++ b/pkg/domain/infra/abi/pods_stats.go
@@ -2,6 +2,7 @@ package abi
import (
"context"
+ "errors"
"fmt"
"github.com/containers/common/pkg/cgroups"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/utils"
"github.com/docker/go-units"
- "github.com/pkg/errors"
)
// PodStats implements printing stats about pods.
@@ -28,7 +28,7 @@ func (ic *ContainerEngine) PodStats(ctx context.Context, namesOrIds []string, op
// Get the (running) pods and convert them to the entities format.
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
- return nil, errors.Wrap(err, "unable to get list of pods")
+ return nil, fmt.Errorf("unable to get list of pods: %w", err)
}
return ic.podsToStatsReport(pods)
}
diff --git a/pkg/domain/infra/abi/secrets.go b/pkg/domain/infra/abi/secrets.go
index b9380ad73..7321ef715 100644
--- a/pkg/domain/infra/abi/secrets.go
+++ b/pkg/domain/infra/abi/secrets.go
@@ -2,13 +2,14 @@ package abi
import (
"context"
+ "fmt"
"io"
"io/ioutil"
"path/filepath"
+ "strings"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/utils"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader io.Reader, options entities.SecretCreateOptions) (*entities.SecretCreateReport, error) {
@@ -60,11 +61,11 @@ func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string
for _, nameOrID := range nameOrIDs {
secret, err := manager.Lookup(nameOrID)
if err != nil {
- if errors.Cause(err).Error() == "no such secret" {
+ if strings.Contains(err.Error(), "no such secret") {
errs = append(errs, err)
continue
} else {
- return nil, nil, errors.Wrapf(err, "error inspecting secret %s", nameOrID)
+ return nil, nil, fmt.Errorf("error inspecting secret %s: %w", nameOrID, err)
}
}
report := &entities.SecretInfoReport{
@@ -141,7 +142,7 @@ func (ic *ContainerEngine) SecretRm(ctx context.Context, nameOrIDs []string, opt
}
for _, nameOrID := range toRemove {
deletedID, err := manager.Delete(nameOrID)
- if err == nil || errors.Cause(err).Error() == "no such secret" {
+ if err == nil || strings.Contains(err.Error(), "no such secret") {
reports = append(reports, &entities.SecretRmReport{
Err: err,
ID: deletedID,
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 10f3e70b1..96690afef 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -2,6 +2,7 @@ package abi
import (
"context"
+ "errors"
"fmt"
"net/url"
"os"
@@ -10,6 +11,7 @@ import (
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
@@ -18,7 +20,6 @@ import (
"github.com/containers/podman/v4/utils"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
@@ -98,7 +99,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
}
pausePidPath, err := util.GetRootlessPauseProcessPidPathGivenDir(tmpDir)
if err != nil {
- return errors.Wrapf(err, "could not get pause process pid file path")
+ return fmt.Errorf("could not get pause process pid file path: %w", err)
}
became, ret, err := rootless.TryJoinPauseProcess(pausePidPath)
@@ -124,10 +125,16 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
paths = append(paths, ctr.Config().ConmonPidFile)
}
- became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths)
- utils.MovePauseProcessToScope(pausePidPath)
+ if len(paths) > 0 {
+ became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths)
+ } else {
+ became, ret, err = rootless.BecomeRootInUserNS(pausePidPath)
+ if err == nil {
+ utils.MovePauseProcessToScope(pausePidPath)
+ }
+ }
if err != nil {
- logrus.Error(errors.Wrapf(err, "invalid internal status, try resetting the pause process with %q", os.Args[0]+" system migrate"))
+ logrus.Error(fmt.Errorf("invalid internal status, try resetting the pause process with %q: %w", os.Args[0]+" system migrate", err))
os.Exit(1)
}
if became {
@@ -136,7 +143,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
return nil
}
-// SystemPrune removes unused data from the system. Pruning pods, containers, volumes and images.
+// SystemPrune removes unused data from the system. Pruning pods, containers, networks, volumes and images.
func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) {
var systemPruneReport = new(entities.SystemPruneReport)
filters := []string{}
@@ -147,6 +154,9 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
found := true
for found {
found = false
+
+ // TODO: Figure out cleaner way to handle all of the different PruneOptions
+ // Remove all unused pods.
podPruneReport, err := ic.prunePodHelper(ctx)
if err != nil {
return nil, err
@@ -154,9 +164,10 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
if len(podPruneReport) > 0 {
found = true
}
+
systemPruneReport.PodPruneReport = append(systemPruneReport.PodPruneReport, podPruneReport...)
- // TODO: Figure out cleaner way to handle all of the different PruneOptions
+ // Remove all unused containers.
containerPruneOptions := entities.ContainerPruneOptions{}
containerPruneOptions.Filters = (url.Values)(options.Filters)
@@ -164,16 +175,18 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
if err != nil {
return nil, err
}
+
reclaimedSpace += reports.PruneReportsSize(containerPruneReports)
systemPruneReport.ContainerPruneReports = append(systemPruneReport.ContainerPruneReports, containerPruneReports...)
+
+ // Remove all unused images.
imagePruneOptions := entities.ImagePruneOptions{
All: options.All,
Filter: filters,
}
+
imageEngine := ImageEngine{Libpod: ic.Libpod}
imagePruneReports, err := imageEngine.Prune(ctx, imagePruneOptions)
- reclaimedSpace += reports.PruneReportsSize(imagePruneReports)
-
if err != nil {
return nil, err
}
@@ -181,10 +194,33 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
found = true
}
+ reclaimedSpace += reports.PruneReportsSize(imagePruneReports)
systemPruneReport.ImagePruneReports = append(systemPruneReport.ImagePruneReports, imagePruneReports...)
+
+ // Remove all unused networks.
+ networkPruneOptions := entities.NetworkPruneOptions{}
+ networkPruneOptions.Filters = options.Filters
+
+ networkPruneReport, err := ic.NetworkPrune(ctx, networkPruneOptions)
+ if err != nil {
+ return nil, err
+ }
+ if len(networkPruneReport) > 0 {
+ found = true
+ }
+ for _, net := range networkPruneReport {
+ systemPruneReport.NetworkPruneReports = append(systemPruneReport.NetworkPruneReports, &reports.PruneReport{
+ Id: net.Name,
+ Err: net.Error,
+ Size: 0,
+ })
+ }
+
+ // Remove unused volume data.
if options.Volume {
volumePruneOptions := entities.VolumePruneOptions{}
volumePruneOptions.Filters = (url.Values)(options.Filters)
+
volumePruneReport, err := ic.VolumePrune(ctx, volumePruneOptions)
if err != nil {
return nil, err
@@ -192,6 +228,7 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
if len(volumePruneReport) > 0 {
found = true
}
+
reclaimedSpace += reports.PruneReportsSize(volumePruneReport)
systemPruneReport.VolumePruneReports = append(systemPruneReport.VolumePruneReports, volumePruneReport...)
}
@@ -234,22 +271,22 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
iid, _ := c.Image()
state, err := c.State()
if err != nil {
- return nil, errors.Wrapf(err, "Failed to get state of container %s", c.ID())
+ return nil, fmt.Errorf("failed to get state of container %s: %w", c.ID(), err)
}
conSize, err := c.RootFsSize()
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
- logrus.Error(errors.Wrapf(err, "Failed to get root file system size of container %s", c.ID()))
+ if errors.Is(err, storage.ErrContainerUnknown) {
+ logrus.Error(fmt.Errorf("failed to get root file system size of container %s: %w", c.ID(), err))
} else {
- return nil, errors.Wrapf(err, "Failed to get root file system size of container %s", c.ID())
+ return nil, fmt.Errorf("failed to get root file system size of container %s: %w", c.ID(), err)
}
}
rwsize, err := c.RWSize()
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
- logrus.Error(errors.Wrapf(err, "Failed to get read/write size of container %s", c.ID()))
+ if errors.Is(err, storage.ErrContainerUnknown) {
+ logrus.Error(fmt.Errorf("failed to get read/write size of container %s: %w", c.ID(), err))
} else {
- return nil, errors.Wrapf(err, "Failed to get read/write size of container %s", c.ID())
+ return nil, fmt.Errorf("failed to get read/write size of container %s: %w", c.ID(), err)
}
}
report := entities.SystemDfContainerReport{
@@ -282,8 +319,8 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
}
dfVolumes := make([]*entities.SystemDfVolumeReport, 0, len(vols))
- var reclaimableSize uint64
for _, v := range vols {
+ var reclaimableSize uint64
var consInUse int
mountPoint, err := v.MountPoint()
if err != nil {
@@ -304,10 +341,10 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
return nil, err
}
if len(inUse) == 0 {
- reclaimableSize += volSize
+ reclaimableSize = volSize
}
for _, viu := range inUse {
- if util.StringInSlice(viu, runningContainers) {
+ if cutil.StringInSlice(viu, runningContainers) {
consInUse++
}
}
@@ -327,7 +364,7 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
}
func (se *SystemEngine) Reset(ctx context.Context) error {
- return se.Libpod.Reset(ctx)
+ return nil
}
func (se *SystemEngine) Renumber(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig) error {
@@ -367,9 +404,9 @@ func (ic *ContainerEngine) Unshare(ctx context.Context, args []string, options e
}
// Make sure to unlock, unshare can run for a long time.
rootlessNetNS.Lock.Unlock()
- // We do not want to cleanup the netns after unshare.
- // The problem is that we cannot know if we need to cleanup and
- // secondly unshare should allow user to setup the namespace with
+ // We do not want to clean up the netns after unshare.
+ // The problem is that we cannot know if we need to clean up and
+ // secondly unshare should allow user to set up the namespace with
// special things, e.g. potentially macvlan or something like that.
return rootlessNetNS.Do(unshare)
}
diff --git a/pkg/domain/infra/abi/terminal/sigproxy_linux.go b/pkg/domain/infra/abi/terminal/sigproxy_linux.go
index fe2c268c0..16d345f06 100644
--- a/pkg/domain/infra/abi/terminal/sigproxy_linux.go
+++ b/pkg/domain/infra/abi/terminal/sigproxy_linux.go
@@ -1,6 +1,7 @@
package terminal
import (
+ "errors"
"os"
"syscall"
@@ -8,7 +9,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/shutdown"
"github.com/containers/podman/v4/pkg/signal"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -20,7 +20,7 @@ const signalBufferSize = 2048
func ProxySignals(ctr *libpod.Container) {
// Stop catching the shutdown signals (SIGINT, SIGTERM) - they're going
// to the container now.
- shutdown.Stop() // nolint: errcheck
+ shutdown.Stop() //nolint: errcheck
sigBuffer := make(chan os.Signal, signalBufferSize)
signal.CatchAll(sigBuffer)
@@ -39,7 +39,7 @@ func ProxySignals(ctr *libpod.Container) {
}
if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid {
+ if errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Infof("Ceasing signal forwarding to container %s as it has stopped", ctr.ID())
} else {
logrus.Errorf("forwarding signal %d to container %s: %v", s, ctr.ID(), err)
diff --git a/pkg/domain/infra/abi/terminal/terminal.go b/pkg/domain/infra/abi/terminal/terminal.go
index 692f8dcd5..37dadd92a 100644
--- a/pkg/domain/infra/abi/terminal/terminal.go
+++ b/pkg/domain/infra/abi/terminal/terminal.go
@@ -2,13 +2,13 @@ package terminal
import (
"context"
+ "fmt"
"os"
"os/signal"
- "github.com/containers/podman/v4/libpod/define"
+ "github.com/containers/common/pkg/resize"
lsignal "github.com/containers/podman/v4/pkg/signal"
"github.com/moby/term"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -18,20 +18,20 @@ type RawTtyFormatter struct {
// getResize returns a TerminalSize command matching stdin's current
// size on success, and nil on errors.
-func getResize() *define.TerminalSize {
+func getResize() *resize.TerminalSize {
winsize, err := term.GetWinsize(os.Stdin.Fd())
if err != nil {
logrus.Warnf("Could not get terminal size %v", err)
return nil
}
- return &define.TerminalSize{
+ return &resize.TerminalSize{
Width: winsize.Width,
Height: winsize.Height,
}
}
// Helper for prepareAttach - set up a goroutine to generate terminal resize events
-func resizeTty(ctx context.Context, resize chan define.TerminalSize) {
+func resizeTty(ctx context.Context, resize chan resize.TerminalSize) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, lsignal.SIGWINCH)
go func() {
@@ -78,7 +78,7 @@ func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
return bytes, err
}
-func handleTerminalAttach(ctx context.Context, resize chan define.TerminalSize) (context.CancelFunc, *term.State, error) {
+func handleTerminalAttach(ctx context.Context, resize chan resize.TerminalSize) (context.CancelFunc, *term.State, error) {
logrus.Debugf("Handling terminal attach")
subCtx, cancel := context.WithCancel(ctx)
@@ -89,7 +89,7 @@ func handleTerminalAttach(ctx context.Context, resize chan define.TerminalSize)
if err != nil {
// allow caller to not have to do any cleaning up if we error here
cancel()
- return nil, nil, errors.Wrapf(err, "unable to save terminal state")
+ return nil, nil, fmt.Errorf("unable to save terminal state: %w", err)
}
logrus.SetFormatter(&RawTtyFormatter{})
diff --git a/pkg/domain/infra/abi/terminal/terminal_linux.go b/pkg/domain/infra/abi/terminal/terminal_linux.go
index 62d36f28d..222590871 100644
--- a/pkg/domain/infra/abi/terminal/terminal_linux.go
+++ b/pkg/domain/infra/abi/terminal/terminal_linux.go
@@ -6,23 +6,23 @@ import (
"fmt"
"os"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/term"
)
// ExecAttachCtr execs and attaches to a container
func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpod.ExecConfig, streams *define.AttachStreams) (int, error) {
- var resize chan define.TerminalSize
+ var resizechan chan resize.TerminalSize
haveTerminal := term.IsTerminal(int(os.Stdin.Fd()))
// Check if we are attached to a terminal. If we are, generate resize
// events, and set the terminal to raw mode
if haveTerminal && execConfig.Terminal {
- resize = make(chan define.TerminalSize)
- cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
+ resizechan = make(chan resize.TerminalSize)
+ cancel, oldTermState, err := handleTerminalAttach(ctx, resizechan)
if err != nil {
return -1, err
}
@@ -33,14 +33,14 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpo
}
}()
}
- return ctr.Exec(execConfig, streams, resize)
+ return ctr.Exec(execConfig, streams, resizechan)
}
// StartAttachCtr starts and (if required) attaches to a container
// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream
// error. we may need to just lint disable this one.
func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool) error { //nolint: interfacer
- resize := make(chan define.TerminalSize)
+ resize := make(chan resize.TerminalSize)
haveTerminal := term.IsTerminal(int(os.Stdin.Fd()))
@@ -103,7 +103,7 @@ func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr,
err = <-attachChan
if err != nil {
- return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
+ return fmt.Errorf("error attaching to container %s: %w", ctr.ID(), err)
}
return nil
diff --git a/pkg/domain/infra/abi/trust.go b/pkg/domain/infra/abi/trust.go
index 58f099bb6..0e3d8fad9 100644
--- a/pkg/domain/infra/abi/trust.go
+++ b/pkg/domain/infra/abi/trust.go
@@ -3,13 +3,14 @@ package abi
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"strings"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/trust"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -35,11 +36,11 @@ func (ir *ImageEngine) ShowTrust(ctx context.Context, args []string, options ent
}
policyContentStruct, err := trust.GetPolicy(policyPath)
if err != nil {
- return nil, errors.Wrapf(err, "could not read trust policies")
+ return nil, fmt.Errorf("could not read trust policies: %w", err)
}
report.Policies, err = getPolicyShowOutput(policyContentStruct, report.SystemRegistriesDirPath)
if err != nil {
- return nil, errors.Wrapf(err, "could not show trust policies")
+ return nil, fmt.Errorf("could not show trust policies: %w", err)
}
return &report, nil
}
@@ -56,7 +57,7 @@ func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options enti
pubkeysfile := options.PubKeysFile
if len(pubkeysfile) == 0 && trustType == "signedBy" {
- return errors.Errorf("At least one public key must be defined for type 'signedBy'")
+ return errors.New("at least one public key must be defined for type 'signedBy'")
}
policyPath := trust.DefaultPolicyPath(ir.Libpod.SystemContext())
@@ -70,7 +71,7 @@ func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options enti
return err
}
if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil {
- return errors.Errorf("could not read trust policies")
+ return errors.New("could not read trust policies")
}
}
if len(pubkeysfile) != 0 {
@@ -84,7 +85,7 @@ func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options enti
policyContentStruct.Default = newReposContent
} else {
if len(policyContentStruct.Default) == 0 {
- return errors.Errorf("default trust policy must be set")
+ return errors.New("default trust policy must be set")
}
registryExists := false
for transport, transportval := range policyContentStruct.Transports {
@@ -107,7 +108,7 @@ func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options enti
data, err := json.MarshalIndent(policyContentStruct, "", " ")
if err != nil {
- return errors.Wrapf(err, "error setting trust policy")
+ return fmt.Errorf("error setting trust policy: %w", err)
}
return ioutil.WriteFile(policyPath, data, 0644)
}
diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go
index f59f11e20..5e95a0551 100644
--- a/pkg/domain/infra/abi/volumes.go
+++ b/pkg/domain/infra/abi/volumes.go
@@ -2,6 +2,8 @@ package abi
import (
"context"
+ "errors"
+ "fmt"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
@@ -9,7 +11,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities/reports"
"github.com/containers/podman/v4/pkg/domain/filters"
"github.com/containers/podman/v4/pkg/domain/infra/abi/parse"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IDOrNameResponse, error) {
@@ -91,11 +92,11 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
for _, v := range namesOrIds {
vol, err := ic.Libpod.LookupVolume(v)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchVolume {
- errs = append(errs, errors.Errorf("no such volume %s", v))
+ if errors.Is(err, define.ErrNoSuchVolume) {
+ errs = append(errs, fmt.Errorf("no such volume %s", v))
continue
} else {
- return nil, nil, errors.Wrapf(err, "error inspecting volume %s", v)
+ return nil, nil, fmt.Errorf("error inspecting volume %s: %w", v, err)
}
}
vols = append(vols, vol)
@@ -172,7 +173,7 @@ func (ic *ContainerEngine) VolumeMounted(ctx context.Context, nameOrID string) (
mountCount, err := vol.MountCount()
if err != nil {
// FIXME: this error should probably be returned
- return &entities.BoolReport{Value: false}, nil // nolint: nilerr
+ return &entities.BoolReport{Value: false}, nil //nolint: nilerr
}
if mountCount > 0 {
return &entities.BoolReport{Value: true}, nil
@@ -211,3 +212,8 @@ func (ic *ContainerEngine) VolumeUnmount(ctx context.Context, nameOrIDs []string
return reports, nil
}
+
+func (ic *ContainerEngine) VolumeReload(ctx context.Context) (*entities.VolumeReloadReport, error) {
+ report := ic.Libpod.UpdateVolumePlugins(ctx)
+ return &entities.VolumeReloadReport{VolumeReload: *report}, nil
+}
diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go
index 39989c96b..7b5198d2f 100644
--- a/pkg/domain/infra/runtime_abi.go
+++ b/pkg/domain/infra/runtime_abi.go
@@ -53,7 +53,7 @@ func NewSystemEngine(setup entities.EngineSetup, facts *entities.PodmanConfig) (
case entities.RenumberMode:
r, err = GetRuntimeRenumber(context.Background(), facts.FlagSet, facts)
case entities.ResetMode:
- r, err = GetRuntimeRenumber(context.Background(), facts.FlagSet, facts)
+ r, err = GetRuntimeReset(context.Background(), facts.FlagSet, facts)
case entities.MigrateMode:
name, flagErr := facts.FlagSet.GetString("new-runtime")
if flagErr != nil {
diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go
index ac557e9de..f76fab4ea 100644
--- a/pkg/domain/infra/runtime_libpod.go
+++ b/pkg/domain/infra/runtime_libpod.go
@@ -5,20 +5,20 @@ package infra
import (
"context"
+ "errors"
"fmt"
"os"
"os/signal"
"sync"
+ "syscall"
"github.com/containers/common/pkg/cgroups"
- "github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/namespaces"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
flag "github.com/spf13/pflag"
)
@@ -37,6 +37,7 @@ type engineOpts struct {
migrate bool
noStore bool
withFDS bool
+ reset bool
config *entities.PodmanConfig
}
@@ -48,6 +49,7 @@ func GetRuntimeMigrate(ctx context.Context, fs *flag.FlagSet, cfg *entities.Podm
migrate: true,
noStore: false,
withFDS: true,
+ reset: false,
config: cfg,
})
}
@@ -59,6 +61,7 @@ func GetRuntimeDisableFDs(ctx context.Context, fs *flag.FlagSet, cfg *entities.P
migrate: false,
noStore: false,
withFDS: false,
+ reset: false,
config: cfg,
})
}
@@ -70,6 +73,7 @@ func GetRuntimeRenumber(ctx context.Context, fs *flag.FlagSet, cfg *entities.Pod
migrate: false,
noStore: false,
withFDS: true,
+ reset: false,
config: cfg,
})
}
@@ -82,6 +86,7 @@ func GetRuntime(ctx context.Context, flags *flag.FlagSet, cfg *entities.PodmanCo
migrate: false,
noStore: false,
withFDS: true,
+ reset: false,
config: cfg,
})
})
@@ -95,6 +100,18 @@ func GetRuntimeNoStore(ctx context.Context, fs *flag.FlagSet, cfg *entities.Podm
migrate: false,
noStore: true,
withFDS: true,
+ reset: false,
+ config: cfg,
+ })
+}
+
+func GetRuntimeReset(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ renumber: false,
+ migrate: false,
+ noStore: false,
+ withFDS: true,
+ reset: true,
config: cfg,
})
}
@@ -161,6 +178,10 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo
}
}
+ if opts.reset {
+ options = append(options, libpod.WithReset())
+ }
+
if opts.renumber {
options = append(options, libpod.WithRenumber())
}
@@ -295,7 +316,7 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin
uids, gids, err := rootless.GetConfiguredMappings()
if err != nil {
- return nil, errors.Wrapf(err, "cannot read mappings")
+ return nil, fmt.Errorf("cannot read mappings: %w", err)
}
maxUID, maxGID := 0, 0
for _, u := range uids {
@@ -321,7 +342,7 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin
options.HostUIDMapping = false
options.HostGIDMapping = false
- // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op
+ // Simply ignore the setting and do not set up an inner namespace for root as it is a no-op
return &options, nil
}
@@ -373,9 +394,9 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin
// StartWatcher starts a new SIGHUP go routine for the current config.
func StartWatcher(rt *libpod.Runtime) {
- // Setup the signal notifier
+ // Set up the signal notifier
ch := make(chan os.Signal, 1)
- signal.Notify(ch, utils.SIGHUP)
+ signal.Notify(ch, syscall.SIGHUP)
go func() {
for {
diff --git a/pkg/domain/infra/tunnel/auto-update.go b/pkg/domain/infra/tunnel/auto-update.go
index ba41f0378..469da5a7a 100644
--- a/pkg/domain/infra/tunnel/auto-update.go
+++ b/pkg/domain/infra/tunnel/auto-update.go
@@ -2,9 +2,9 @@ package tunnel
import (
"context"
+ "errors"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) {
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 82e8fbb5b..5568ccde8 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -2,6 +2,7 @@ package tunnel
import (
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -23,7 +24,6 @@ import (
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -64,7 +64,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := containers.Pause(ic.ClientCtx, c.ID, nil)
- if err != nil && options.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
+ if err != nil && options.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
logrus.Debugf("Container %s is not running", c.ID)
continue
}
@@ -81,7 +81,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
}
for _, c := range ctrs {
err := containers.Unpause(ic.ClientCtx, c.ID, nil)
- if err != nil && options.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
+ if err != nil && options.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
logrus.Debugf("Container %s is not paused", c.ID)
continue
}
@@ -111,11 +111,11 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
}
if err = containers.Stop(ic.ClientCtx, c.ID, options); err != nil {
// These first two are considered non-fatal under the right conditions
- if errors.Cause(err).Error() == define.ErrCtrStopped.Error() {
+ if strings.Contains(err.Error(), define.ErrCtrStopped.Error()) {
logrus.Debugf("Container %s is already stopped", c.ID)
reports = append(reports, &report)
continue
- } else if opts.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
+ } else if opts.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
logrus.Debugf("Container %s is not running, could not stop", c.ID)
reports = append(reports, &report)
continue
@@ -146,7 +146,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
reports := make([]*entities.KillReport, 0, len(ctrs))
for _, c := range ctrs {
err := containers.Kill(ic.ClientCtx, c.ID, options)
- if err != nil && opts.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
+ if err != nil && opts.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
logrus.Debugf("Container %s is not running", c.ID)
continue
}
@@ -258,7 +258,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
return nil, nil, err
}
if errModel.ResponseCode == 404 {
- errs = append(errs, errors.Errorf("no such container %q", name))
+ errs = append(errs, fmt.Errorf("no such container %q", name))
continue
}
return nil, nil, err
@@ -291,7 +291,7 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string,
if len(opts.ImageName) > 0 {
ref, err := reference.Parse(opts.ImageName)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing reference %q", opts.ImageName)
+ return nil, fmt.Errorf("error parsing reference %q: %w", opts.ImageName, err)
}
if t, ok := ref.(reference.Tagged); ok {
tag = t.Tag()
@@ -300,7 +300,7 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string,
repo = r.Name()
}
if len(repo) < 1 {
- return nil, errors.Errorf("invalid image name %q", opts.ImageName)
+ return nil, fmt.Errorf("invalid image name %q", opts.ImageName)
}
}
options := new(containers.CommitOptions).WithAuthor(opts.Author).WithChanges(opts.Changes).WithComment(opts.Message).WithSquash(opts.Squash)
@@ -502,7 +502,7 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string,
}
ctr := ctrs[0]
if ctr.State != define.ContainerStateRunning.String() {
- return errors.Errorf("you can only attach to running containers")
+ return fmt.Errorf("you can only attach to running containers")
}
options := new(containers.AttachOptions).WithStream(true).WithDetachKeys(opts.DetachKeys)
return containers.Attach(ic.ClientCtx, nameOrID, opts.Stdin, opts.Stdout, opts.Stderr, nil, options)
@@ -570,7 +570,7 @@ func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID s
return sessionID, nil
}
-func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, output, errput *os.File) error { //nolint
+func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, output, errput *os.File) error {
attachErr := make(chan error)
attachReady := make(chan bool)
options := new(containers.AttachOptions).WithStream(true)
@@ -695,7 +695,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
report.ExitCode = define.ExitCode(report.Err)
report.Err = err
reports = append(reports, &report)
- return reports, errors.Wrapf(report.Err, "unable to start container %s", name)
+ return reports, fmt.Errorf("unable to start container %s: %w", name, report.Err)
}
if ctr.AutoRemove {
// Defer the removal, so we can return early if needed and
@@ -739,7 +739,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
reports, err := containers.Remove(ic.ClientCtx, ctr.ID, rmOptions)
logIfRmError(ctr.ID, err, reports)
}
- report.Err = errors.Wrapf(err, "unable to start container %q", name)
+ report.Err = fmt.Errorf("unable to start container %q: %w", name, err)
report.ExitCode = define.ExitCode(err)
reports = append(reports, &report)
continue
@@ -863,7 +863,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
if eventsErr != nil || lastEvent == nil {
logrus.Errorf("Cannot get exit code: %v", err)
report.ExitCode = define.ExecErrorCodeNotFound
- return &report, nil // nolint: nilerr
+ return &report, nil //nolint: nilerr
}
report.ExitCode = lastEvent.ContainerExitCode
@@ -899,7 +899,7 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
err := containers.ContainerInit(ic.ClientCtx, ctr.ID, nil)
// When using all, it is NOT considered an error if a container
// has already been init'd.
- if err != nil && options.All && strings.Contains(errors.Cause(err).Error(), define.ErrCtrStateInvalid.Error()) {
+ if err != nil && options.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
err = nil
}
reports = append(reports, &entities.ContainerInitReport{
@@ -949,7 +949,7 @@ func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, o
}
func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrID, path string, reader io.Reader, options entities.CopyOptions) (entities.ContainerCopyFunc, error) {
- copyOptions := new(containers.CopyOptions).WithChown(options.Chown).WithRename(options.Rename)
+ copyOptions := new(containers.CopyOptions).WithChown(options.Chown).WithRename(options.Rename).WithNoOverwriteDirNonDir(options.NoOverwriteDirNonDir)
return containers.CopyFromArchiveWithOptions(ic.ClientCtx, nameOrID, path, reader, copyOptions)
}
diff --git a/pkg/domain/infra/tunnel/events.go b/pkg/domain/infra/tunnel/events.go
index b472ad03a..30be92e23 100644
--- a/pkg/domain/infra/tunnel/events.go
+++ b/pkg/domain/infra/tunnel/events.go
@@ -2,13 +2,12 @@ package tunnel
import (
"context"
+ "fmt"
"strings"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/bindings/system"
"github.com/containers/podman/v4/pkg/domain/entities"
-
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptions) error {
@@ -17,7 +16,7 @@ func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptio
for _, filter := range opts.Filter {
split := strings.Split(filter, "=")
if len(split) < 2 {
- return errors.Errorf("invalid filter %q", filter)
+ return fmt.Errorf("invalid filter %q", filter)
}
filters[split[0]] = append(filters[split[0]], strings.Join(split[1:], "="))
}
@@ -56,7 +55,7 @@ func (ic *ContainerEngine) GetLastContainerEvent(ctx context.Context, nameOrID s
return nil, err
}
if len(containerEvents) < 1 {
- return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String())
+ return nil, fmt.Errorf("%s not found: %w", containerEvent.String(), events.ErrEventNotFound)
}
// return the last element in the slice
return containerEvents[len(containerEvents)-1], nil
diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go
index 5b14fac37..24b2b619d 100644
--- a/pkg/domain/infra/tunnel/helpers.go
+++ b/pkg/domain/infra/tunnel/helpers.go
@@ -2,13 +2,14 @@ package tunnel
import (
"context"
+ "errors"
+ "fmt"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/bindings/containers"
"github.com/containers/podman/v4/pkg/bindings/pods"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
)
// FIXME: the `ignore` parameter is very likely wrong here as it should rather
@@ -20,7 +21,7 @@ func getContainersByContext(contextWithConnection context.Context, all, ignore b
func getContainersAndInputByContext(contextWithConnection context.Context, all, ignore bool, namesOrIDs []string) ([]entities.ListContainer, []string, error) {
if all && len(namesOrIDs) > 0 {
- return nil, nil, errors.New("cannot lookup containers and all")
+ return nil, nil, errors.New("cannot look up containers and all")
}
options := new(containers.ListOptions).WithAll(true).WithSync(true)
allContainers, err := containers.List(contextWithConnection, options)
@@ -69,7 +70,7 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all,
}
if !found && !ignore {
- return nil, nil, errors.Wrapf(define.ErrNoSuchCtr, "unable to find container %q", nameOrID)
+ return nil, nil, fmt.Errorf("unable to find container %q: %w", nameOrID, define.ErrNoSuchCtr)
}
}
return filtered, rawInputs, nil
@@ -77,7 +78,7 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all,
func getPodsByContext(contextWithConnection context.Context, all bool, namesOrIDs []string) ([]*entities.ListPodsReport, error) {
if all && len(namesOrIDs) > 0 {
- return nil, errors.New("cannot lookup specific pods and all")
+ return nil, errors.New("cannot look up specific pods and all")
}
allPods, err := pods.List(contextWithConnection, nil)
@@ -102,7 +103,7 @@ func getPodsByContext(contextWithConnection context.Context, all bool, namesOrID
inspectData, err := pods.Inspect(contextWithConnection, nameOrID, nil)
if err != nil {
if errorhandling.Contains(err, define.ErrNoSuchPod) {
- return nil, errors.Wrapf(define.ErrNoSuchPod, "unable to find pod %q", nameOrID)
+ return nil, fmt.Errorf("unable to find pod %q: %w", nameOrID, define.ErrNoSuchPod)
}
return nil, err
}
@@ -120,7 +121,7 @@ func getPodsByContext(contextWithConnection context.Context, all bool, namesOrID
}
if !found {
- return nil, errors.Wrapf(define.ErrNoSuchPod, "unable to find pod %q", nameOrID)
+ return nil, fmt.Errorf("unable to find pod %q: %w", nameOrID, define.ErrNoSuchPod)
}
}
return filtered, nil
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index 18e10e8dd..18f750dcc 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -2,6 +2,8 @@ package tunnel
import (
"context"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"strconv"
@@ -12,14 +14,12 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
- "github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/bindings/images"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
"github.com/containers/podman/v4/pkg/domain/utils"
"github.com/containers/podman/v4/pkg/errorhandling"
utils2 "github.com/containers/podman/v4/utils"
- "github.com/pkg/errors"
)
func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) {
@@ -28,7 +28,7 @@ func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.Boo
}
func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts entities.ImageRemoveOptions) (*entities.ImageRemoveReport, []error) {
- options := new(images.RemoveOptions).WithForce(opts.Force).WithIgnore(opts.Ignore).WithAll(opts.All)
+ options := new(images.RemoveOptions).WithForce(opts.Force).WithIgnore(opts.Ignore).WithAll(opts.All).WithLookupManifest(opts.LookupManifest)
return images.Remove(ir.ClientCtx, imagesArg, options)
}
@@ -123,10 +123,6 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, opts entities.
return &entities.ImagePullReport{Images: pulledImages}, nil
}
-func (ir *ImageEngine) Transfer(ctx context.Context, source entities.ImageScpOptions, dest entities.ImageScpOptions, parentFlags []string) error {
- return errors.Wrapf(define.ErrNotImplemented, "cannot use the remote client to transfer images between root and rootless storage")
-}
-
func (ir *ImageEngine) Tag(ctx context.Context, nameOrID string, tags []string, opt entities.ImageTagOptions) error {
options := new(images.TagOptions)
for _, newTag := range tags {
@@ -135,7 +131,7 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrID string, tags []string,
)
ref, err := reference.Parse(newTag)
if err != nil {
- return errors.Wrapf(err, "error parsing reference %q", newTag)
+ return fmt.Errorf("error parsing reference %q: %w", newTag, err)
}
if t, ok := ref.(reference.Tagged); ok {
tag = t.Tag()
@@ -144,7 +140,7 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrID string, tags []string,
repo = r.Name()
}
if len(repo) < 1 {
- return errors.Errorf("invalid image name %q", nameOrID)
+ return fmt.Errorf("invalid image name %q", nameOrID)
}
if err := images.Tag(ir.ClientCtx, nameOrID, tag, repo, options); err != nil {
return err
@@ -165,7 +161,7 @@ func (ir *ImageEngine) Untag(ctx context.Context, nameOrID string, tags []string
)
ref, err := reference.Parse(newTag)
if err != nil {
- return errors.Wrapf(err, "error parsing reference %q", newTag)
+ return fmt.Errorf("error parsing reference %q: %w", newTag, err)
}
if t, ok := ref.(reference.Tagged); ok {
tag = t.Tag()
@@ -177,7 +173,7 @@ func (ir *ImageEngine) Untag(ctx context.Context, nameOrID string, tags []string
repo = r.Name()
}
if len(repo) < 1 {
- return errors.Errorf("invalid image name %q", nameOrID)
+ return fmt.Errorf("invalid image name %q", nameOrID)
}
if err := images.Untag(ir.ClientCtx, nameOrID, tag, repo, options); err != nil {
return err
@@ -198,7 +194,7 @@ func (ir *ImageEngine) Inspect(ctx context.Context, namesOrIDs []string, opts en
return nil, nil, err
}
if errModel.ResponseCode == 404 {
- errs = append(errs, errors.Wrapf(err, "unable to inspect %q", i))
+ errs = append(errs, fmt.Errorf("unable to inspect %q: %w", i, err))
continue
}
return nil, nil, err
@@ -219,7 +215,7 @@ func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions)
return nil, err
}
if fInfo.IsDir() {
- return nil, errors.Errorf("remote client supports archives only but %q is a directory", opts.Input)
+ return nil, fmt.Errorf("remote client supports archives only but %q is a directory", opts.Input)
}
return images.Load(ir.ClientCtx, f)
}
@@ -244,7 +240,7 @@ func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOpti
func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, opts entities.ImagePushOptions) error {
options := new(images.PushOptions)
- options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format)
+ options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format).WithRemoveSignatures(opts.RemoveSignatures)
if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined {
if s == types.OptionalBoolTrue {
@@ -300,7 +296,7 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string,
switch {
case err == nil:
if info.Mode().IsRegular() {
- return errors.Errorf("%q already exists as a regular file", opts.Output)
+ return fmt.Errorf("%q already exists as a regular file", opts.Output)
}
case os.IsNotExist(err):
if err := os.Mkdir(opts.Output, 0755); err != nil {
@@ -367,3 +363,23 @@ func (ir *ImageEngine) Shutdown(_ context.Context) {
func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
return nil, errors.New("not implemented yet")
}
+
+func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool) error {
+ options := new(images.ScpOptions)
+
+ var destination *string
+ if len(dst) > 1 {
+ destination = &dst
+ }
+ options.Quiet = &quiet
+ options.Destination = destination
+
+ rep, err := images.Scp(ir.ClientCtx, &src, destination, *options)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Loaded Image(s):", rep.Id)
+
+ return nil
+}
diff --git a/pkg/domain/infra/tunnel/manifest.go b/pkg/domain/infra/tunnel/manifest.go
index 9ac3fdb83..d2554f198 100644
--- a/pkg/domain/infra/tunnel/manifest.go
+++ b/pkg/domain/infra/tunnel/manifest.go
@@ -3,6 +3,7 @@ package tunnel
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"strings"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/pkg/bindings/images"
"github.com/containers/podman/v4/pkg/bindings/manifests"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
)
// ManifestCreate implements manifest create via ImageEngine
@@ -18,7 +18,7 @@ func (ir *ImageEngine) ManifestCreate(ctx context.Context, name string, images [
options := new(manifests.CreateOptions).WithAll(opts.All)
imageID, err := manifests.Create(ir.ClientCtx, name, images, options)
if err != nil {
- return imageID, errors.Wrapf(err, "error creating manifest")
+ return imageID, fmt.Errorf("error creating manifest: %w", err)
}
return imageID, err
}
@@ -36,12 +36,12 @@ func (ir *ImageEngine) ManifestExists(ctx context.Context, name string) (*entiti
func (ir *ImageEngine) ManifestInspect(_ context.Context, name string) ([]byte, error) {
list, err := manifests.Inspect(ir.ClientCtx, name, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error getting content of manifest list or image %s", name)
+ return nil, fmt.Errorf("error getting content of manifest list or image %s: %w", name, err)
}
buf, err := json.MarshalIndent(list, "", " ")
if err != nil {
- return buf, errors.Wrapf(err, "error rendering manifest for display")
+ return buf, fmt.Errorf("error rendering manifest for display: %w", err)
}
return buf, err
}
@@ -56,7 +56,7 @@ func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames []
for _, annotationSpec := range opts.Annotation {
spec := strings.SplitN(annotationSpec, "=", 2)
if len(spec) != 2 {
- return "", errors.Errorf("no value given for annotation %q", spec[0])
+ return "", fmt.Errorf("no value given for annotation %q", spec[0])
}
annotations[spec[0]] = spec[1]
}
@@ -72,7 +72,7 @@ func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames []
id, err := manifests.Add(ir.ClientCtx, name, options)
if err != nil {
- return id, errors.Wrapf(err, "error adding to manifest list %s", name)
+ return id, fmt.Errorf("error adding to manifest list %s: %w", name, err)
}
return id, nil
}
@@ -86,7 +86,7 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, images string
func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name string, image string) (string, error) {
updatedListID, err := manifests.Remove(ir.ClientCtx, name, image, nil)
if err != nil {
- return updatedListID, errors.Wrapf(err, "error removing from manifest %s", name)
+ return updatedListID, fmt.Errorf("error removing from manifest %s: %w", name, err)
}
return fmt.Sprintf("%s :%s\n", updatedListID, image), nil
}
@@ -99,7 +99,7 @@ func (ir *ImageEngine) ManifestRm(ctx context.Context, names []string) (*entitie
// ManifestPush pushes a manifest list or image index to the destination
func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) {
options := new(images.PushOptions)
- options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile)
+ options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithRemoveSignatures(opts.RemoveSignatures)
options.WithAll(opts.All)
if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined {
diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go
index 415999a96..ffdcbab1e 100644
--- a/pkg/domain/infra/tunnel/network.go
+++ b/pkg/domain/infra/tunnel/network.go
@@ -2,13 +2,14 @@ package tunnel
import (
"context"
+ "errors"
+ "fmt"
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/bindings/network"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) NetworkList(ctx context.Context, opts entities.NetworkListOptions) ([]types.Network, error) {
@@ -30,7 +31,7 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri
return nil, nil, err
}
if errModel.ResponseCode == 404 {
- errs = append(errs, errors.Wrapf(define.ErrNoSuchNetwork, "network %s", name))
+ errs = append(errs, fmt.Errorf("network %s: %w", name, define.ErrNoSuchNetwork))
continue
}
return nil, nil, err
diff --git a/pkg/domain/infra/tunnel/pods.go b/pkg/domain/infra/tunnel/pods.go
index 2dbdfcf80..bcbd32d1b 100644
--- a/pkg/domain/infra/tunnel/pods.go
+++ b/pkg/domain/infra/tunnel/pods.go
@@ -2,12 +2,12 @@ package tunnel
import (
"context"
+ "errors"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/bindings/pods"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
@@ -80,6 +80,10 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
}
reports := make([]*entities.PodUnpauseReport, 0, len(foundPods))
for _, p := range foundPods {
+ // If the pod is not paused or degraded, there is no need to attempt an unpause on it
+ if p.Status != define.PodStatePaused && p.Status != define.PodStateDegraded {
+ continue
+ }
response, err := pods.Unpause(ic.ClientCtx, p.Id, nil)
if err != nil {
report := entities.PodUnpauseReport{
@@ -97,7 +101,7 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opts entities.PodStopOptions) ([]*entities.PodStopReport, error) {
timeout := -1
foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, namesOrIds)
- if err != nil && !(opts.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
+ if err != nil && !(opts.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
return nil, err
}
if opts.Timeout != -1 {
@@ -164,7 +168,7 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, opts entities.PodRmOptions) ([]*entities.PodRmReport, error) {
foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, namesOrIds)
- if err != nil && !(opts.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
+ if err != nil && !(opts.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
return nil, err
}
reports := make([]*entities.PodRmReport, 0, len(foundPods))
@@ -195,6 +199,10 @@ func (ic *ContainerEngine) PodCreate(ctx context.Context, specg entities.PodSpec
return pods.CreatePodFromSpec(ic.ClientCtx, &specg)
}
+func (ic *ContainerEngine) PodClone(ctx context.Context, podClone entities.PodCloneOptions) (*entities.PodCloneReport, error) {
+ return nil, nil
+}
+
func (ic *ContainerEngine) PodTop(ctx context.Context, opts entities.PodTopOptions) (*entities.StringSliceReport, error) {
switch {
case opts.Latest:
diff --git a/pkg/domain/infra/tunnel/secrets.go b/pkg/domain/infra/tunnel/secrets.go
index 2412ed0a2..d26718b12 100644
--- a/pkg/domain/infra/tunnel/secrets.go
+++ b/pkg/domain/infra/tunnel/secrets.go
@@ -2,12 +2,12 @@ package tunnel
import (
"context"
+ "fmt"
"io"
"github.com/containers/podman/v4/pkg/bindings/secrets"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader io.Reader, options entities.SecretCreateOptions) (*entities.SecretCreateReport, error) {
@@ -33,7 +33,7 @@ func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string
return nil, nil, err
}
if errModel.ResponseCode == 404 {
- errs = append(errs, errors.Errorf("no such secret %q", name))
+ errs = append(errs, fmt.Errorf("no such secret %q", name))
continue
}
return nil, nil, err
@@ -73,7 +73,7 @@ func (ic *ContainerEngine) SecretRm(ctx context.Context, nameOrIDs []string, opt
}
if errModel.ResponseCode == 404 {
allRm = append(allRm, &entities.SecretRmReport{
- Err: errors.Errorf("no secret with name or id %q: no such secret ", name),
+ Err: fmt.Errorf("no secret with name or id %q: no such secret ", name),
ID: "",
})
continue
diff --git a/pkg/domain/infra/tunnel/volumes.go b/pkg/domain/infra/tunnel/volumes.go
index 33e090148..b70d29783 100644
--- a/pkg/domain/infra/tunnel/volumes.go
+++ b/pkg/domain/infra/tunnel/volumes.go
@@ -2,12 +2,13 @@ package tunnel
import (
"context"
+ "errors"
+ "fmt"
"github.com/containers/podman/v4/pkg/bindings/volumes"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IDOrNameResponse, error) {
@@ -64,7 +65,7 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
return nil, nil, err
}
if errModel.ResponseCode == 404 {
- errs = append(errs, errors.Errorf("no such volume %q", id))
+ errs = append(errs, fmt.Errorf("no such volume %q", id))
continue
}
return nil, nil, err
@@ -108,3 +109,7 @@ func (ic *ContainerEngine) VolumeMount(ctx context.Context, nameOrIDs []string)
func (ic *ContainerEngine) VolumeUnmount(ctx context.Context, nameOrIDs []string) ([]*entities.VolumeUnmountReport, error) {
return nil, errors.New("unmounting volumes is not supported for remote clients")
}
+
+func (ic *ContainerEngine) VolumeReload(ctx context.Context) (*entities.VolumeReloadReport, error) {
+ return nil, errors.New("volume reload is not supported for remote clients")
+}
diff --git a/pkg/domain/utils/scp.go b/pkg/domain/utils/scp.go
new file mode 100644
index 000000000..3c73cddd1
--- /dev/null
+++ b/pkg/domain/utils/scp.go
@@ -0,0 +1,580 @@
+package utils
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "os"
+ "os/exec"
+ "os/user"
+ "strconv"
+ "strings"
+ "time"
+
+ scpD "github.com/dtylman/scp"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/containers/podman/v4/pkg/domain/entities"
+ "github.com/containers/podman/v4/pkg/terminal"
+ "github.com/docker/distribution/reference"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool) (*entities.ImageLoadReport, *entities.ImageScpOptions, *entities.ImageScpOptions, []string, error) {
+ source := entities.ImageScpOptions{}
+ dest := entities.ImageScpOptions{}
+ sshInfo := entities.ImageScpConnections{}
+ report := entities.ImageLoadReport{Names: []string{}}
+
+ podman, err := os.Executable()
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ f, err := ioutil.TempFile("", "podman") // open temp file for load/save output
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ confR, err := config.NewConfig("") // create a hand made config for the remote engine since we might use remote and native at once
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("could not make config: %w", err)
+ }
+
+ cfg, err := config.ReadCustomConfig() // get ready to set ssh destination if necessary
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ locations := []*entities.ImageScpOptions{}
+ cliConnections := []string{}
+ args := []string{src}
+ if len(dst) > 0 {
+ args = append(args, dst)
+ }
+ for _, arg := range args {
+ loc, connect, err := ParseImageSCPArg(arg)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ locations = append(locations, loc)
+ cliConnections = append(cliConnections, connect...)
+ }
+ source = *locations[0]
+ switch {
+ case len(locations) > 1:
+ if err = ValidateSCPArgs(locations); err != nil {
+ return nil, nil, nil, nil, err
+ }
+ dest = *locations[1]
+ case len(locations) == 1:
+ switch {
+ case len(locations[0].Image) == 0:
+ return nil, nil, nil, nil, fmt.Errorf("no source image specified: %w", define.ErrInvalidArg)
+ case len(locations[0].Image) > 0 && !locations[0].Remote && len(locations[0].User) == 0: // if we have podman image scp $IMAGE
+ return nil, nil, nil, nil, fmt.Errorf("must specify a destination: %w", define.ErrInvalidArg)
+ }
+ }
+
+ source.Quiet = quiet
+ source.File = f.Name() // after parsing the arguments, set the file for the save/load
+ dest.File = source.File
+ if err = os.Remove(source.File); err != nil { // remove the file and simply use its name so podman creates the file upon save. avoids umask errors
+ return nil, nil, nil, nil, err
+ }
+
+ allLocal := true // if we are all localhost, do not validate connections but if we are using one localhost and one non we need to use sshd
+ for _, val := range cliConnections {
+ if !strings.Contains(val, "@localhost::") {
+ allLocal = false
+ break
+ }
+ }
+ if allLocal {
+ cliConnections = []string{}
+ }
+
+ var serv map[string]config.Destination
+ serv, err = GetServiceInformation(&sshInfo, cliConnections, cfg)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ confR.Engine = config.EngineConfig{Remote: true, CgroupManager: "cgroupfs", ServiceDestinations: serv} // pass the service dest (either remote or something else) to engine
+ saveCmd, loadCmd := CreateCommands(source, dest, parentFlags, podman)
+
+ switch {
+ case source.Remote: // if we want to load FROM the remote, dest can either be local or remote in this case
+ err = SaveToRemote(source.Image, source.File, "", sshInfo.URI[0], sshInfo.Identities[0])
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ if dest.Remote { // we want to load remote -> remote, both source and dest are remote
+ rep, id, err := LoadToRemote(dest, dest.File, "", sshInfo.URI[1], sshInfo.Identities[1])
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ if len(rep) > 0 {
+ fmt.Println(rep)
+ }
+ if len(id) > 0 {
+ report.Names = append(report.Names, id)
+ }
+ break
+ }
+ id, err := ExecPodman(dest, podman, loadCmd)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ if len(id) > 0 {
+ report.Names = append(report.Names, id)
+ }
+ case dest.Remote: // remote host load, implies source is local
+ _, err = ExecPodman(dest, podman, saveCmd)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ rep, id, err := LoadToRemote(dest, source.File, "", sshInfo.URI[0], sshInfo.Identities[0])
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ if len(rep) > 0 {
+ fmt.Println(rep)
+ }
+ if len(id) > 0 {
+ report.Names = append(report.Names, id)
+ }
+ if err = os.Remove(source.File); err != nil {
+ return nil, nil, nil, nil, err
+ }
+ default: // else native load, both source and dest are local and transferring between users
+ if source.User == "" { // source user has to be set, destination does not
+ source.User = os.Getenv("USER")
+ if source.User == "" {
+ u, err := user.Current()
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("could not obtain user, make sure the environmental variable $USER is set: %w", err)
+ }
+ source.User = u.Username
+ }
+ }
+ return nil, &source, &dest, parentFlags, nil // transfer needs to be done in ABI due to cross issues
+ }
+
+ return &report, nil, nil, nil, nil
+}
+
+// CreateSCPCommand takes an existing command, appends the given arguments and returns a configured podman command for image scp
+func CreateSCPCommand(cmd *exec.Cmd, command []string) *exec.Cmd {
+ cmd.Args = append(cmd.Args, command...)
+ cmd.Env = os.Environ()
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = os.Stdout
+ return cmd
+}
+
+// ScpTag is a helper function for native podman to tag an image after a local load from image SCP
+func ScpTag(cmd *exec.Cmd, podman string, dest entities.ImageScpOptions) error {
+ cmd.Stdout = nil
+ out, err := cmd.Output() // this function captures the output temporarily in order to execute the next command
+ if err != nil {
+ return err
+ }
+ image := ExtractImage(out)
+ if cmd.Args[0] == "sudo" { // transferRootless will need the sudo since we are loading to sudo from a user acct
+ cmd = exec.Command("sudo", podman, "tag", image, dest.Tag)
+ } else {
+ cmd = exec.Command(podman, "tag", image, dest.Tag)
+ }
+ cmd.Stdout = os.Stdout
+ return cmd.Run()
+}
+
+// ExtractImage pulls out the last line of output from save/load (image id)
+func ExtractImage(out []byte) string {
+ fmt.Println(strings.TrimSuffix(string(out), "\n")) // print output
+ stringOut := string(out) // get all output
+ arrOut := strings.Split(stringOut, " ") // split it into an array
+ return strings.ReplaceAll(arrOut[len(arrOut)-1], "\n", "") // replace the trailing \n
+}
+
+// LoginUser starts the user process on the host so that image scp can use systemd-run
+func LoginUser(user string) (*exec.Cmd, error) {
+ sleep, err := exec.LookPath("sleep")
+ if err != nil {
+ return nil, err
+ }
+ machinectl, err := exec.LookPath("machinectl")
+ if err != nil {
+ return nil, err
+ }
+ cmd := exec.Command(machinectl, "shell", "-q", user+"@.host", sleep, "inf")
+ err = cmd.Start()
+ return cmd, err
+}
+
+// loadToRemote takes image and remote connection information. it connects to the specified client
+// and copies the saved image dir over to the remote host and then loads it onto the machine
+// returns a string containing output or an error
+func LoadToRemote(dest entities.ImageScpOptions, localFile string, tag string, url *url.URL, iden string) (string, string, error) {
+ dial, remoteFile, err := CreateConnection(url, iden)
+ if err != nil {
+ return "", "", err
+ }
+ defer dial.Close()
+
+ n, err := scpD.CopyTo(dial, localFile, remoteFile)
+ if err != nil {
+ errOut := strconv.Itoa(int(n)) + " Bytes copied before error"
+ return " ", "", fmt.Errorf("%v: %w", errOut, err)
+ }
+ var run string
+ if tag != "" {
+ return "", "", fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg)
+ }
+ podman := os.Args[0]
+ run = podman + " image load --input=" + remoteFile + ";rm " + remoteFile // run ssh image load of the file copied via scp
+ out, err := ExecRemoteCommand(dial, run)
+ if err != nil {
+ return "", "", err
+ }
+ rep := strings.TrimSuffix(string(out), "\n")
+ outArr := strings.Split(rep, " ")
+ id := outArr[len(outArr)-1]
+ if len(dest.Tag) > 0 { // tag the remote image using the output ID
+ run = podman + " tag " + id + " " + dest.Tag
+ _, err = ExecRemoteCommand(dial, run)
+ if err != nil {
+ return "", "", err
+ }
+ }
+ return rep, id, nil
+}
+
+// saveToRemote takes image information and remote connection information. it connects to the specified client
+// and saves the specified image on the remote machine and then copies it to the specified local location
+// returns an error if one occurs.
+func SaveToRemote(image, localFile string, tag string, uri *url.URL, iden string) error {
+ dial, remoteFile, err := CreateConnection(uri, iden)
+
+ if err != nil {
+ return err
+ }
+ defer dial.Close()
+
+ if tag != "" {
+ return fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg)
+ }
+ podman := os.Args[0]
+ run := podman + " image save " + image + " --format=oci-archive --output=" + remoteFile // run ssh image load of the file copied via scp. Files are reverse in this case...
+ _, err = ExecRemoteCommand(dial, run)
+ if err != nil {
+ return err
+ }
+ n, err := scpD.CopyFrom(dial, remoteFile, localFile)
+ if _, conErr := ExecRemoteCommand(dial, "rm "+remoteFile); conErr != nil {
+ logrus.Errorf("Removing file on endpoint: %v", conErr)
+ }
+ if err != nil {
+ errOut := strconv.Itoa(int(n)) + " Bytes copied before error"
+ return fmt.Errorf("%v: %w", errOut, err)
+ }
+ return nil
+}
+
+// makeRemoteFile creates the necessary remote file on the host to
+// save or load the image to. returns a string with the file name or an error
+func MakeRemoteFile(dial *ssh.Client) (string, error) {
+ run := "mktemp"
+ remoteFile, err := ExecRemoteCommand(dial, run)
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSuffix(string(remoteFile), "\n"), nil
+}
+
+// createConnections takes a boolean determining which ssh client to dial
+// and returns the dials client, its newly opened remote file, and an error if applicable.
+func CreateConnection(url *url.URL, iden string) (*ssh.Client, string, error) {
+ cfg, err := ValidateAndConfigure(url, iden)
+ if err != nil {
+ return nil, "", err
+ }
+ dialAdd, err := ssh.Dial("tcp", url.Host, cfg) // dial the client
+ if err != nil {
+ return nil, "", fmt.Errorf("failed to connect: %w", err)
+ }
+ file, err := MakeRemoteFile(dialAdd)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return dialAdd, file, nil
+}
+
+// GetSerivceInformation takes the parsed list of hosts to connect to and validates the information
+func GetServiceInformation(sshInfo *entities.ImageScpConnections, cliConnections []string, cfg *config.Config) (map[string]config.Destination, error) {
+ var serv map[string]config.Destination
+ var urlS string
+ var iden string
+ for i, val := range cliConnections {
+ splitEnv := strings.SplitN(val, "::", 2)
+ sshInfo.Connections = append(sshInfo.Connections, splitEnv[0])
+ conn, found := cfg.Engine.ServiceDestinations[sshInfo.Connections[i]]
+ if found {
+ urlS = conn.URI
+ iden = conn.Identity
+ } else { // no match, warn user and do a manual connection.
+ urlS = "ssh://" + sshInfo.Connections[i]
+ iden = ""
+ logrus.Warnf("Unknown connection name given. Please use system connection add to specify the default remote socket location")
+ }
+ urlFinal, err := url.Parse(urlS) // create an actual url to pass to exec command
+ if err != nil {
+ return nil, err
+ }
+ if urlFinal.User.Username() == "" {
+ if urlFinal.User, err = GetUserInfo(urlFinal); err != nil {
+ return nil, err
+ }
+ }
+ sshInfo.URI = append(sshInfo.URI, urlFinal)
+ sshInfo.Identities = append(sshInfo.Identities, iden)
+ }
+ return serv, nil
+}
+
+// execPodman executes the podman save/load command given the podman binary
+func ExecPodman(dest entities.ImageScpOptions, podman string, command []string) (string, error) {
+ cmd := exec.Command(podman)
+ CreateSCPCommand(cmd, command[1:])
+ logrus.Debugf("Executing podman command: %q", cmd)
+ if strings.Contains(strings.Join(command, " "), "load") { // need to tag
+ if len(dest.Tag) > 0 {
+ return "", ScpTag(cmd, podman, dest)
+ }
+ cmd.Stdout = nil
+ out, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+ img := ExtractImage(out)
+ return img, nil
+ }
+ return "", cmd.Run()
+}
+
+// createCommands forms the podman save and load commands used by SCP
+func CreateCommands(source entities.ImageScpOptions, dest entities.ImageScpOptions, parentFlags []string, podman string) ([]string, []string) {
+ var parentString string
+ quiet := ""
+ if source.Quiet {
+ quiet = "-q "
+ }
+ if len(parentFlags) > 0 {
+ parentString = strings.Join(parentFlags, " ") + " " // if there are parent args, an extra space needs to be added
+ } else {
+ parentString = strings.Join(parentFlags, " ")
+ }
+ loadCmd := strings.Split(fmt.Sprintf("%s %sload %s--input %s", podman, parentString, quiet, dest.File), " ")
+ saveCmd := strings.Split(fmt.Sprintf("%s %vsave %s--output %s %s", podman, parentString, quiet, source.File, source.Image), " ")
+ return saveCmd, loadCmd
+}
+
+// parseImageSCPArg returns the valid connection, and source/destination data based off of the information provided by the user
+// arg is a string containing one of the cli arguments returned is a filled out source/destination options structs as well as a connections array and an error if applicable
+func ParseImageSCPArg(arg string) (*entities.ImageScpOptions, []string, error) {
+ location := entities.ImageScpOptions{}
+ var err error
+ cliConnections := []string{}
+
+ switch {
+ case strings.Contains(arg, "@localhost::"): // image transfer between users
+ location.User = strings.Split(arg, "@")[0]
+ location, err = ValidateImagePortion(location, arg)
+ if err != nil {
+ return nil, nil, err
+ }
+ cliConnections = append(cliConnections, arg)
+ case strings.Contains(arg, "::"):
+ location, err = ValidateImagePortion(location, arg)
+ if err != nil {
+ return nil, nil, err
+ }
+ location.Remote = true
+ cliConnections = append(cliConnections, arg)
+ default:
+ location.Image = arg
+ }
+ return &location, cliConnections, nil
+}
+
+// validateImagePortion is a helper function to validate the image name in an SCP argument
+func ValidateImagePortion(location entities.ImageScpOptions, arg string) (entities.ImageScpOptions, error) {
+ if RemoteArgLength(arg, 1) > 0 {
+ err := ValidateImageName(strings.Split(arg, "::")[1])
+ if err != nil {
+ return location, err
+ }
+ location.Image = strings.Split(arg, "::")[1] // this will get checked/set again once we validate connections
+ }
+ return location, nil
+}
+
+// validateSCPArgs takes the array of source and destination options and checks for common errors
+func ValidateSCPArgs(locations []*entities.ImageScpOptions) error {
+ if len(locations) > 2 {
+ return fmt.Errorf("cannot specify more than two arguments: %w", define.ErrInvalidArg)
+ }
+ switch {
+ case len(locations[0].Image) > 0 && len(locations[1].Image) > 0:
+ locations[1].Tag = locations[1].Image
+ locations[1].Image = ""
+ case len(locations[0].Image) == 0 && len(locations[1].Image) == 0:
+ return fmt.Errorf("a source image must be specified: %w", define.ErrInvalidArg)
+ }
+ return nil
+}
+
+// validateImageName makes sure that the image given is valid and no injections are occurring
+// we simply use this for error checking, bot setting the image
+func ValidateImageName(input string) error {
+ // ParseNormalizedNamed transforms a shortname image into its
+ // full name reference so busybox => docker.io/library/busybox
+ // we want to keep our shortnames, so only return an error if
+ // we cannot parse what the user has given us
+ _, err := reference.ParseNormalizedNamed(input)
+ return err
+}
+
+// remoteArgLength is a helper function to simplify the extracting of host argument data
+// returns an int which contains the length of a specified index in a host::image string
+func RemoteArgLength(input string, side int) int {
+ if strings.Contains(input, "::") {
+ return len((strings.Split(input, "::"))[side])
+ }
+ return -1
+}
+
+// ExecRemoteCommand takes a ssh client connection and a command to run and executes the
+// command on the specified client. The function returns the Stdout from the client or the Stderr
+func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) {
+ sess, err := dial.NewSession() // new ssh client session
+ if err != nil {
+ return nil, err
+ }
+ defer sess.Close()
+
+ var buffer bytes.Buffer
+ var bufferErr bytes.Buffer
+ sess.Stdout = &buffer // output from client funneled into buffer
+ sess.Stderr = &bufferErr // err form client funneled into buffer
+ if err := sess.Run(run); err != nil { // run the command on the ssh client
+ return nil, fmt.Errorf("%v: %w", bufferErr.String(), err)
+ }
+ return buffer.Bytes(), nil
+}
+
+func GetUserInfo(uri *url.URL) (*url.Userinfo, error) {
+ var (
+ usr *user.User
+ err error
+ )
+ if u, found := os.LookupEnv("_CONTAINERS_ROOTLESS_UID"); found {
+ usr, err = user.LookupId(u)
+ if err != nil {
+ return nil, fmt.Errorf("failed to lookup rootless user: %w", err)
+ }
+ } else {
+ usr, err = user.Current()
+ if err != nil {
+ return nil, fmt.Errorf("failed to obtain current user: %w", err)
+ }
+ }
+
+ pw, set := uri.User.Password()
+ if set {
+ return url.UserPassword(usr.Username, pw), nil
+ }
+ return url.User(usr.Username), nil
+}
+
+// ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid
+// iden iden can be blank to mean no identity key
+// once the function validates the information it creates and returns an ssh.ClientConfig.
+func ValidateAndConfigure(uri *url.URL, iden string) (*ssh.ClientConfig, error) {
+ var signers []ssh.Signer
+ passwd, passwdSet := uri.User.Password()
+ if iden != "" { // iden might be blank if coming from image scp or if no validation is needed
+ value := iden
+ s, err := terminal.PublicKey(value, []byte(passwd))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read identity %q: %w", value, err)
+ }
+ signers = append(signers, s)
+ logrus.Debugf("SSH Ident Key %q %s %s", value, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
+ }
+ if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { // validate ssh information, specifically the unix file socket used by the ssh agent.
+ logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer enabled", sock)
+
+ c, err := net.Dial("unix", sock)
+ if err != nil {
+ return nil, err
+ }
+ agentSigners, err := agent.NewClient(c).Signers()
+ if err != nil {
+ return nil, err
+ }
+
+ signers = append(signers, agentSigners...)
+
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ for _, s := range agentSigners {
+ logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
+ }
+ }
+ }
+ var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization
+ if len(signers) > 0 {
+ var dedup = make(map[string]ssh.Signer)
+ for _, s := range signers {
+ fp := ssh.FingerprintSHA256(s.PublicKey())
+ if _, found := dedup[fp]; found {
+ logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
+ }
+ dedup[fp] = s
+ }
+
+ var uniq []ssh.Signer
+ for _, s := range dedup {
+ uniq = append(uniq, s)
+ }
+ authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) {
+ return uniq, nil
+ }))
+ }
+ if passwdSet { // if password authentication is given and valid, add to the list
+ authMethods = append(authMethods, ssh.Password(passwd))
+ }
+ if len(authMethods) == 0 {
+ authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) {
+ pass, err := terminal.ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username()))
+ return string(pass), err
+ }))
+ }
+ tick, err := time.ParseDuration("40s")
+ if err != nil {
+ return nil, err
+ }
+ cfg := &ssh.ClientConfig{
+ User: uri.User.Username(),
+ Auth: authMethods,
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+ Timeout: tick,
+ }
+ return cfg, nil
+}
diff --git a/pkg/domain/utils/secrets_filters.go b/pkg/domain/utils/secrets_filters.go
index e76bc592f..ab9b681ec 100644
--- a/pkg/domain/utils/secrets_filters.go
+++ b/pkg/domain/utils/secrets_filters.go
@@ -1,11 +1,11 @@
package utils
import (
+ "fmt"
"strings"
"github.com/containers/common/pkg/secrets"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func IfPassesSecretsFilter(s secrets.Secret, filters map[string][]string) (bool, error) {
@@ -17,7 +17,7 @@ func IfPassesSecretsFilter(s secrets.Secret, filters map[string][]string) (bool,
case "id":
result = util.StringMatchRegexSlice(s.ID, filterValues)
default:
- return false, errors.Errorf("invalid filter %q", key)
+ return false, fmt.Errorf("invalid filter %q", key)
}
}
return result, nil
diff --git a/pkg/domain/utils/utils_test.go b/pkg/domain/utils/utils_test.go
index 952a4b5be..291567f6b 100644
--- a/pkg/domain/utils/utils_test.go
+++ b/pkg/domain/utils/utils_test.go
@@ -5,6 +5,7 @@ import (
"sort"
"testing"
+ "github.com/containers/podman/v4/pkg/domain/entities"
"github.com/stretchr/testify/assert"
)
@@ -74,3 +75,41 @@ func TestToURLValues(t *testing.T) {
})
}
}
+
+func TestParseSCPArgs(t *testing.T) {
+ args := []string{"alpine", "root@localhost::"}
+ var source *entities.ImageScpOptions
+ var dest *entities.ImageScpOptions
+ var err error
+ source, _, err = ParseImageSCPArg(args[0])
+ assert.Nil(t, err)
+ assert.Equal(t, source.Image, "alpine")
+
+ dest, _, err = ParseImageSCPArg(args[1])
+ assert.Nil(t, err)
+ assert.Equal(t, dest.Image, "")
+ assert.Equal(t, dest.User, "root")
+
+ args = []string{"root@localhost::alpine"}
+ source, _, err = ParseImageSCPArg(args[0])
+ assert.Nil(t, err)
+ assert.Equal(t, source.User, "root")
+ assert.Equal(t, source.Image, "alpine")
+
+ args = []string{"charliedoern@192.168.68.126::alpine", "foobar@192.168.68.126::"}
+ source, _, err = ParseImageSCPArg(args[0])
+ assert.Nil(t, err)
+ assert.True(t, source.Remote)
+ assert.Equal(t, source.Image, "alpine")
+
+ dest, _, err = ParseImageSCPArg(args[1])
+ assert.Nil(t, err)
+ assert.True(t, dest.Remote)
+ assert.Equal(t, dest.Image, "")
+
+ args = []string{"charliedoern@192.168.68.126::alpine"}
+ source, _, err = ParseImageSCPArg(args[0])
+ assert.Nil(t, err)
+ assert.True(t, source.Remote)
+ assert.Equal(t, source.Image, "alpine")
+}
diff --git a/pkg/env/env.go b/pkg/env/env.go
index 5989d0da5..8af9fd77c 100644
--- a/pkg/env/env.go
+++ b/pkg/env/env.go
@@ -8,8 +8,6 @@ import (
"fmt"
"os"
"strings"
-
- "github.com/pkg/errors"
)
const whiteSpaces = " \t"
@@ -56,7 +54,7 @@ func ParseFile(path string) (_ map[string]string, err error) {
env := make(map[string]string)
defer func() {
if err != nil {
- err = errors.Wrapf(err, "error parsing env file %q", path)
+ err = fmt.Errorf("error parsing env file %q: %w", path, err)
}
}()
@@ -85,12 +83,12 @@ func parseEnv(env map[string]string, line string) error {
// catch invalid variables such as "=" or "=A"
if data[0] == "" {
- return errors.Errorf("invalid environment variable: %q", line)
+ return fmt.Errorf("invalid environment variable: %q", line)
}
// trim the front of a variable, but nothing else
name := strings.TrimLeft(data[0], whiteSpaces)
if strings.ContainsAny(name, whiteSpaces) {
- return errors.Errorf("name %q has white spaces, poorly formatted name", name)
+ return fmt.Errorf("name %q has white spaces, poorly formatted name", name)
}
if len(data) > 1 {
diff --git a/pkg/errorhandling/errorhandling.go b/pkg/errorhandling/errorhandling.go
index 6ee1e7e86..9b456c9c0 100644
--- a/pkg/errorhandling/errorhandling.go
+++ b/pkg/errorhandling/errorhandling.go
@@ -1,11 +1,11 @@
package errorhandling
import (
+ "errors"
"os"
"strings"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -86,7 +86,7 @@ func Contains(err error, sub error) bool {
// PodConflictErrorModel is used in remote connections with podman
type PodConflictErrorModel struct {
Errs []string
- Id string // nolint
+ Id string //nolint:revive,stylecheck
}
// ErrorModel is used in remote connections with podman
@@ -121,3 +121,22 @@ func (e PodConflictErrorModel) Error() string {
func (e PodConflictErrorModel) Code() int {
return 409
}
+
+// Cause returns the most underlying error for the provided one. There is a
+// maximum error depth of 100 to avoid endless loops. An additional error log
+// message will be created if this maximum has reached.
+func Cause(err error) (cause error) {
+ cause = err
+
+ const maxDepth = 100
+ for i := 0; i <= maxDepth; i++ {
+ res := errors.Unwrap(cause)
+ if res == nil {
+ return cause
+ }
+ cause = res
+ }
+
+ logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err)
+ return cause
+}
diff --git a/pkg/errorhandling/errorhandling_test.go b/pkg/errorhandling/errorhandling_test.go
new file mode 100644
index 000000000..ec720c5e7
--- /dev/null
+++ b/pkg/errorhandling/errorhandling_test.go
@@ -0,0 +1,53 @@
+package errorhandling
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCause(t *testing.T) {
+ t.Parallel()
+
+ for _, tc := range []struct {
+ name string
+ err func() error
+ expectedErr error
+ }{
+ {
+ name: "nil error",
+ err: func() error { return nil },
+ expectedErr: nil,
+ },
+ {
+ name: "equal errors",
+ err: func() error { return errors.New("foo") },
+ expectedErr: errors.New("foo"),
+ },
+ {
+ name: "wrapped error",
+ err: func() error { return fmt.Errorf("baz: %w", fmt.Errorf("bar: %w", errors.New("foo"))) },
+ expectedErr: errors.New("foo"),
+ },
+ {
+ name: "max depth reached",
+ err: func() error {
+ err := errors.New("error")
+ for i := 0; i <= 101; i++ {
+ err = fmt.Errorf("%d: %w", i, err)
+ }
+ return err
+ },
+ expectedErr: fmt.Errorf("0: %w", errors.New("error")),
+ },
+ } {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ err := Cause(tc.err())
+ assert.Equal(t, tc.expectedErr, err)
+ })
+ }
+}
diff --git a/pkg/hooks/1.0.0/hook.go b/pkg/hooks/1.0.0/hook.go
index 244e8800f..71f940a64 100644
--- a/pkg/hooks/1.0.0/hook.go
+++ b/pkg/hooks/1.0.0/hook.go
@@ -3,12 +3,12 @@ package hook
import (
"encoding/json"
+ "errors"
"fmt"
"os"
"regexp"
rspec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// Version is the hook configuration version defined in this package.
@@ -50,16 +50,16 @@ func (hook *Hook) Validate(extensionStages []string) (err error) {
for key, value := range hook.When.Annotations {
if _, err = regexp.Compile(key); err != nil {
- return errors.Wrapf(err, "invalid annotation key %q", key)
+ return fmt.Errorf("invalid annotation key %q: %w", key, err)
}
if _, err = regexp.Compile(value); err != nil {
- return errors.Wrapf(err, "invalid annotation value %q", value)
+ return fmt.Errorf("invalid annotation value %q: %w", value, err)
}
}
for _, command := range hook.When.Commands {
if _, err = regexp.Compile(command); err != nil {
- return errors.Wrapf(err, "invalid command %q", command)
+ return fmt.Errorf("invalid command %q: %w", command, err)
}
}
diff --git a/pkg/hooks/1.0.0/when.go b/pkg/hooks/1.0.0/when.go
index a65af048e..a1351890f 100644
--- a/pkg/hooks/1.0.0/when.go
+++ b/pkg/hooks/1.0.0/when.go
@@ -1,10 +1,11 @@
package hook
import (
+ "errors"
+ "fmt"
"regexp"
rspec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// When holds hook-injection conditions.
@@ -52,12 +53,12 @@ func (when *When) Match(config *rspec.Spec, annotations map[string]string, hasBi
for key, value := range annotations {
match, err = regexp.MatchString(keyPattern, key)
if err != nil {
- return false, errors.Wrap(err, "annotation key")
+ return false, fmt.Errorf("annotation key: %w", err)
}
if match {
match, err = regexp.MatchString(valuePattern, value)
if err != nil {
- return false, errors.Wrap(err, "annotation value")
+ return false, fmt.Errorf("annotation value: %w", err)
}
if match {
break
@@ -82,7 +83,7 @@ func (when *When) Match(config *rspec.Spec, annotations map[string]string, hasBi
for _, cmdPattern := range when.Commands {
match, err := regexp.MatchString(cmdPattern, command)
if err != nil {
- return false, errors.Wrap(err, "command")
+ return false, fmt.Errorf("command: %w", err)
}
if match {
return true, nil
diff --git a/pkg/hooks/exec/exec.go b/pkg/hooks/exec/exec.go
index 2b7bc5f31..bc639245f 100644
--- a/pkg/hooks/exec/exec.go
+++ b/pkg/hooks/exec/exec.go
@@ -10,7 +10,6 @@ import (
"time"
rspec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -46,7 +45,7 @@ func Run(ctx context.Context, hook *rspec.Hook, state []byte, stdout io.Writer,
go func() {
err := cmd.Wait()
if err != nil {
- err = errors.Wrapf(err, "executing %v", cmd.Args)
+ err = fmt.Errorf("executing %v: %w", cmd.Args, err)
}
exit <- err
}()
diff --git a/pkg/hooks/exec/runtimeconfigfilter.go b/pkg/hooks/exec/runtimeconfigfilter.go
index 3ab3073b2..72d4b8979 100644
--- a/pkg/hooks/exec/runtimeconfigfilter.go
+++ b/pkg/hooks/exec/runtimeconfigfilter.go
@@ -4,12 +4,12 @@ import (
"bytes"
"context"
"encoding/json"
+ "fmt"
"reflect"
"time"
"github.com/davecgh/go-spew/spew"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/pmezard/go-difflib/difflib"
"github.com/sirupsen/logrus"
)
@@ -43,7 +43,7 @@ func RuntimeConfigFilter(ctx context.Context, hooks []spec.Hook, config *spec.Sp
err = json.Unmarshal(data, &newConfig)
if err != nil {
logrus.Debugf("invalid JSON from config-filter hook %d:\n%s", i, string(data))
- return nil, errors.Wrapf(err, "unmarshal output from config-filter hook %d", i)
+ return nil, fmt.Errorf("unmarshal output from config-filter hook %d: %w", i, err)
}
if !reflect.DeepEqual(config, &newConfig) {
diff --git a/pkg/hooks/exec/runtimeconfigfilter_test.go b/pkg/hooks/exec/runtimeconfigfilter_test.go
index f4b6cf86a..a4e9b1fdb 100644
--- a/pkg/hooks/exec/runtimeconfigfilter_test.go
+++ b/pkg/hooks/exec/runtimeconfigfilter_test.go
@@ -3,28 +3,29 @@ package exec
import (
"context"
"encoding/json"
+ "errors"
"os"
"testing"
"time"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestRuntimeConfigFilter(t *testing.T) {
- unexpectedEndOfJSONInput := json.Unmarshal([]byte("{\n"), nil) //nolint
+ unexpectedEndOfJSONInput := json.Unmarshal([]byte("{\n"), nil) //nolint:govet // this should force the error
fileMode := os.FileMode(0600)
rootUint32 := uint32(0)
binUser := int(1)
for _, tt := range []struct {
- name string
- contextTimeout time.Duration
- hooks []spec.Hook
- input *spec.Spec
- expected *spec.Spec
- expectedHookError string
- expectedRunError error
+ name string
+ contextTimeout time.Duration
+ hooks []spec.Hook
+ input *spec.Spec
+ expected *spec.Spec
+ expectedHookError string
+ expectedRunError error
+ expectedRunErrorString string
}{
{
name: "no-op",
@@ -231,7 +232,8 @@ func TestRuntimeConfigFilter(t *testing.T) {
Path: "rootfs",
},
},
- expectedRunError: unexpectedEndOfJSONInput,
+ expectedRunError: unexpectedEndOfJSONInput,
+ expectedRunErrorString: unexpectedEndOfJSONInput.Error(),
},
} {
test := tt
@@ -243,7 +245,13 @@ func TestRuntimeConfigFilter(t *testing.T) {
defer cancel()
}
hookErr, err := RuntimeConfigFilter(ctx, test.hooks, test.input, DefaultPostKillTimeout)
- assert.Equal(t, test.expectedRunError, errors.Cause(err))
+ if test.expectedRunError != nil {
+ if test.expectedRunErrorString != "" {
+ assert.Contains(t, err.Error(), test.expectedRunErrorString)
+ } else {
+ assert.True(t, errors.Is(err, test.expectedRunError))
+ }
+ }
if test.expectedHookError == "" {
if hookErr != nil {
t.Fatal(hookErr)
diff --git a/pkg/hooks/hooks.go b/pkg/hooks/hooks.go
index ecd5b7dfc..14f98b1de 100644
--- a/pkg/hooks/hooks.go
+++ b/pkg/hooks/hooks.go
@@ -11,7 +11,6 @@ import (
current "github.com/containers/podman/v4/pkg/hooks/1.0.0"
rspec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -105,7 +104,7 @@ func (m *Manager) Hooks(config *rspec.Spec, annotations map[string]string, hasBi
for _, namedHook := range hooks {
match, err := namedHook.hook.When.Match(config, annotations, hasBindMounts)
if err != nil {
- return extensionStageHooks, errors.Wrapf(err, "matching hook %q", namedHook.name)
+ return extensionStageHooks, fmt.Errorf("matching hook %q: %w", namedHook.name, err)
}
if match {
logrus.Debugf("hook %s matched; adding to stages %v", namedHook.name, namedHook.hook.Stages)
diff --git a/pkg/hooks/read.go b/pkg/hooks/read.go
index c48d51f7d..379ed67ef 100644
--- a/pkg/hooks/read.go
+++ b/pkg/hooks/read.go
@@ -3,6 +3,8 @@ package hooks
import (
"encoding/json"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -10,7 +12,6 @@ import (
old "github.com/containers/podman/v4/pkg/hooks/0.1.0"
current "github.com/containers/podman/v4/pkg/hooks/1.0.0"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -36,7 +37,7 @@ func Read(path string, extensionStages []string) (*current.Hook, error) {
}
hook, err := read(content)
if err != nil {
- return nil, errors.Wrapf(err, "parsing hook %q", path)
+ return nil, fmt.Errorf("parsing hook %q: %w", path, err)
}
err = hook.Validate(extensionStages)
return hook, err
@@ -45,16 +46,16 @@ func Read(path string, extensionStages []string) (*current.Hook, error) {
func read(content []byte) (hook *current.Hook, err error) {
var ver version
if err := json.Unmarshal(content, &ver); err != nil {
- return nil, errors.Wrap(err, "version check")
+ return nil, fmt.Errorf("version check: %w", err)
}
reader, ok := Readers[ver.Version]
if !ok {
- return nil, errors.Errorf("unrecognized hook version: %q", ver.Version)
+ return nil, fmt.Errorf("unrecognized hook version: %q", ver.Version)
}
hook, err = reader(content)
if err != nil {
- return hook, errors.Wrap(err, ver.Version)
+ return hook, fmt.Errorf("%v: %w", ver.Version, err)
}
return hook, err
}
@@ -83,7 +84,7 @@ func ReadDir(path string, extensionStages []string, hooks map[string]*current.Ho
if res == nil {
res = err
} else {
- res = errors.Wrapf(res, "%v", err)
+ res = fmt.Errorf("%v: %w", err, res)
}
continue
}
diff --git a/pkg/inspect/inspect.go b/pkg/inspect/inspect.go
index 767d86daf..15943858f 100644
--- a/pkg/inspect/inspect.go
+++ b/pkg/inspect/inspect.go
@@ -41,18 +41,3 @@ type RootFS struct {
Type string `json:"Type"`
Layers []digest.Digest `json:"Layers"`
}
-
-// ImageResult is used for podman images for collection and output.
-type ImageResult struct {
- Tag string
- Repository string
- RepoDigests []string
- RepoTags []string
- ID string
- Digest digest.Digest
- ConfigDigest digest.Digest
- Created time.Time
- Size *uint64
- Labels map[string]string
- Dangling bool
-}
diff --git a/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go b/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go
index d05984dac..69613321f 100644
--- a/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go
+++ b/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go
@@ -48,7 +48,7 @@ const (
var (
Zero = int64Amount{}
- // Used by quantity strings - treat as read only
+ // Used by quantity strings - treat as read-only
zeroBytes = []byte("0")
)
diff --git a/pkg/k8s.io/apimachinery/pkg/api/resource/math.go b/pkg/k8s.io/apimachinery/pkg/api/resource/math.go
index 9d03f5c05..59a4c14de 100644
--- a/pkg/k8s.io/apimachinery/pkg/api/resource/math.go
+++ b/pkg/k8s.io/apimachinery/pkg/api/resource/math.go
@@ -29,13 +29,13 @@ const (
)
var (
- // Commonly needed big.Int values-- treat as read only!
+ // Commonly needed big.Int values-- treat as read-only!
bigTen = big.NewInt(10)
bigZero = big.NewInt(0)
bigOne = big.NewInt(1)
big1024 = big.NewInt(1024)
- // Commonly needed inf.Dec values-- treat as read only!
+ // Commonly needed inf.Dec values-- treat as read-only!
decZero = inf.NewDec(0, 0)
decOne = inf.NewDec(1, 0)
diff --git a/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go b/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go
index dcc5df219..588a189bf 100644
--- a/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -138,7 +138,6 @@ const (
var (
// Errors that could happen while parsing a string.
- //nolint:revive
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
@@ -258,7 +257,7 @@ Suffix:
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
- // nolint:nakedret
+ //nolint:nakedret
return
}
@@ -579,9 +578,9 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
// append
result = result[:1]
- result = append(result, number...) // nolint: makezero
- result = append(result, suffix...) // nolint: makezero
- result = append(result, '"') // nolint: makezero
+ result = append(result, number...) //nolint: makezero
+ result = append(result, suffix...) //nolint: makezero
+ result = append(result, '"') //nolint: makezero
return result, nil
}
diff --git a/pkg/kubeutils/LICENSE b/pkg/kubeutils/LICENSE
deleted file mode 100644
index 9b259bdfc..000000000
--- a/pkg/kubeutils/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/pkg/machine/config.go b/pkg/machine/config.go
index d34776714..29cd7bc00 100644
--- a/pkg/machine/config.go
+++ b/pkg/machine/config.go
@@ -4,7 +4,7 @@
package machine
import (
- errors2 "errors"
+ "errors"
"io/ioutil"
"net"
"net/url"
@@ -13,7 +13,6 @@ import (
"time"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -42,7 +41,9 @@ const (
// Running indicates the qemu vm is running.
Running Status = "running"
// Stopped indicates the vm has stopped.
- Stopped Status = "stopped"
+ Stopped Status = "stopped"
+ // Starting indicated the vm is in the process of starting
+ Starting Status = "starting"
DefaultMachineName string = "podman-machine-default"
)
@@ -53,6 +54,7 @@ type Provider interface {
IsValidVMName(name string) (bool, error)
CheckExclusiveActiveVM() (bool, string, error)
RemoveAndCleanMachines() error
+ VMType() string
}
type RemoteConnectionType string
@@ -62,7 +64,7 @@ var (
DefaultIgnitionUserName = "core"
ErrNoSuchVM = errors.New("VM does not exist")
ErrVMAlreadyExists = errors.New("VM already exists")
- ErrVMAlreadyRunning = errors.New("VM already running")
+ ErrVMAlreadyRunning = errors.New("VM already running or starting")
ErrMultipleActiveVM = errors.New("only one VM can be active at a time")
ForwarderBinaryName = "gvproxy"
)
@@ -88,6 +90,7 @@ type ListResponse struct {
CreatedAt time.Time
LastUp time.Time
Running bool
+ Starting bool
Stream string
VMType string
CPUs uint64
@@ -138,14 +141,15 @@ type DistributionDownload interface {
Get() *Download
}
type InspectInfo struct {
- ConfigPath VMFile
- Created time.Time
- Image ImageConfig
- LastUp time.Time
- Name string
- Resources ResourceConfig
- SSHConfig SSHConfig
- State Status
+ ConfigPath VMFile
+ ConnectionInfo ConnectionConfig
+ Created time.Time
+ Image ImageConfig
+ LastUp time.Time
+ Name string
+ Resources ResourceConfig
+ SSHConfig SSHConfig
+ State Status
}
func (rc RemoteConnectionType) MakeSSHURL(host, path, port, userName string) url.URL {
@@ -251,11 +255,11 @@ func (m *VMFile) GetPath() string {
// the actual path
func (m *VMFile) Delete() error {
if m.Symlink != nil {
- if err := os.Remove(*m.Symlink); err != nil && !errors2.Is(err, os.ErrNotExist) {
+ if err := os.Remove(*m.Symlink); err != nil && !errors.Is(err, os.ErrNotExist) {
logrus.Errorf("unable to remove symlink %q", *m.Symlink)
}
}
- if err := os.Remove(m.Path); err != nil && !errors2.Is(err, os.ErrNotExist) {
+ if err := os.Remove(m.Path); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
return nil
@@ -269,14 +273,14 @@ func (m *VMFile) Read() ([]byte, error) {
// NewMachineFile is a constructor for VMFile
func NewMachineFile(path string, symlink *string) (*VMFile, error) {
if len(path) < 1 {
- return nil, errors2.New("invalid machine file path")
+ return nil, errors.New("invalid machine file path")
}
if symlink != nil && len(*symlink) < 1 {
- return nil, errors2.New("invalid symlink path")
+ return nil, errors.New("invalid symlink path")
}
mf := VMFile{Path: path}
if symlink != nil && len(path) > maxSocketPathLength {
- if err := mf.makeSymlink(symlink); err != nil && !errors2.Is(err, os.ErrExist) {
+ if err := mf.makeSymlink(symlink); err != nil && !errors.Is(err, os.ErrExist) {
return nil, err
}
}
@@ -286,13 +290,13 @@ func NewMachineFile(path string, symlink *string) (*VMFile, error) {
// makeSymlink for macOS creates a symlink in $HOME/.podman/
// for a machinefile like a socket
func (m *VMFile) makeSymlink(symlink *string) error {
- homedir, err := os.UserHomeDir()
+ homeDir, err := os.UserHomeDir()
if err != nil {
return err
}
- sl := filepath.Join(homedir, ".podman", *symlink)
+ sl := filepath.Join(homeDir, ".podman", *symlink)
// make the symlink dir and throw away if it already exists
- if err := os.MkdirAll(filepath.Dir(sl), 0700); err != nil && !errors2.Is(err, os.ErrNotExist) {
+ if err := os.MkdirAll(filepath.Dir(sl), 0700); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
m.Symlink = &sl
@@ -335,3 +339,9 @@ type SSHConfig struct {
// RemoteUsername of the vm user
RemoteUsername string
}
+
+// ConnectionConfig contains connections like sockets, etc.
+type ConnectionConfig struct {
+ // PodmanSocket is the exported podman service socket
+ PodmanSocket *VMFile `json:"PodmanSocket"`
+}
diff --git a/pkg/machine/config_test.go b/pkg/machine/config_test.go
index d9fc5425e..ca08660b9 100644
--- a/pkg/machine/config_test.go
+++ b/pkg/machine/config_test.go
@@ -1,3 +1,6 @@
+//go:build amd64 || arm64
+// +build amd64 arm64
+
package machine
import (
diff --git a/pkg/machine/connection.go b/pkg/machine/connection.go
index 841b2afa6..6ff761a92 100644
--- a/pkg/machine/connection.go
+++ b/pkg/machine/connection.go
@@ -4,10 +4,10 @@
package machine
import (
+ "errors"
"fmt"
"github.com/containers/common/pkg/config"
- "github.com/pkg/errors"
)
func AddConnection(uri fmt.Stringer, name, identity string, isDefault bool) error {
@@ -72,7 +72,7 @@ func RemoveConnection(name string) error {
if _, ok := cfg.Engine.ServiceDestinations[name]; ok {
delete(cfg.Engine.ServiceDestinations, name)
} else {
- return errors.Errorf("unable to find connection named %q", name)
+ return fmt.Errorf("unable to find connection named %q", name)
}
return cfg.Write()
}
diff --git a/pkg/machine/e2e/config.go b/pkg/machine/e2e/config.go
index c17b840d3..b3fe74b0c 100644
--- a/pkg/machine/e2e/config.go
+++ b/pkg/machine/e2e/config.go
@@ -3,7 +3,6 @@ package e2e
import (
"encoding/json"
"fmt"
- "math/rand"
"os"
"os/exec"
"path/filepath"
@@ -13,6 +12,7 @@ import (
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/qemu"
"github.com/containers/podman/v4/pkg/util"
+ "github.com/containers/storage/pkg/stringid"
. "github.com/onsi/ginkgo" //nolint:golint,stylecheck
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
@@ -85,6 +85,14 @@ func (ms *machineSession) outputToString() string {
return strings.Join(fields, " ")
}
+// errorToString returns the error output from a session in string form
+func (ms *machineSession) errorToString() string {
+ if ms == nil || ms.Err == nil || ms.Err.Contents() == nil {
+ return ""
+ }
+ return string(ms.Err.Contents())
+}
+
// newMB constructor for machine test builders
func newMB() (*machineTestBuilder, error) {
mb := machineTestBuilder{
@@ -128,14 +136,14 @@ func (m *machineTestBuilder) setTimeout(timeout time.Duration) *machineTestBuild
// toQemuInspectInfo is only for inspecting qemu machines. Other providers will need
// to make their own.
-func (mb *machineTestBuilder) toQemuInspectInfo() ([]qemuMachineInspectInfo, int, error) {
+func (mb *machineTestBuilder) toQemuInspectInfo() ([]machine.InspectInfo, int, error) {
args := []string{"machine", "inspect"}
args = append(args, mb.names...)
session, err := runWrapper(mb.podmanBinary, args, defaultTimeout, true)
if err != nil {
return nil, -1, err
}
- mii := []qemuMachineInspectInfo{}
+ mii := []machine.InspectInfo{}
err = json.Unmarshal(session.Bytes(), &mii)
return mii, session.ExitCode(), err
}
@@ -171,10 +179,5 @@ func (m *machineTestBuilder) init() {}
// randomString returns a string of given length composed of random characters
func randomString(n int) string {
- var randomLetters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
- b := make([]rune, n)
- for i := range b {
- b[i] = randomLetters[rand.Intn(len(randomLetters))]
- }
- return string(b)
+ return stringid.GenerateRandomID()[0:12]
}
diff --git a/pkg/machine/e2e/config_info.go b/pkg/machine/e2e/config_info.go
new file mode 100644
index 000000000..410c7e518
--- /dev/null
+++ b/pkg/machine/e2e/config_info.go
@@ -0,0 +1,20 @@
+package e2e
+
+type infoMachine struct {
+ format string
+ cmd []string
+}
+
+func (i *infoMachine) buildCmd(m *machineTestBuilder) []string {
+ cmd := []string{"machine", "info"}
+ if len(i.format) > 0 {
+ cmd = append(cmd, "--format", i.format)
+ }
+ i.cmd = cmd
+ return cmd
+}
+
+func (i *infoMachine) withFormat(format string) *infoMachine {
+ i.format = format
+ return i
+}
diff --git a/pkg/machine/e2e/info_test.go b/pkg/machine/e2e/info_test.go
new file mode 100644
index 000000000..eeabb78af
--- /dev/null
+++ b/pkg/machine/e2e/info_test.go
@@ -0,0 +1,58 @@
+package e2e
+
+import (
+ "github.com/containers/podman/v4/cmd/podman/machine"
+ jsoniter "github.com/json-iterator/go"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gexec"
+)
+
+var _ = Describe("podman machine info", func() {
+ var (
+ mb *machineTestBuilder
+ testDir string
+ )
+
+ BeforeEach(func() {
+ testDir, mb = setup()
+ })
+ AfterEach(func() {
+ teardown(originalHomeDir, testDir, mb)
+ })
+
+ It("machine info", func() {
+ info := new(infoMachine)
+ infoSession, err := mb.setCmd(info).run()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(infoSession).Should(Exit(0))
+
+ // Verify go template works and check for no running machines
+ info = new(infoMachine)
+ infoSession, err = mb.setCmd(info.withFormat("{{.Host.NumberOfMachines}}")).run()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(infoSession).Should(Exit(0))
+ Expect(infoSession.outputToString()).To(Equal("0"))
+
+ // Create a machine and check if info has been updated
+ i := new(initMachine)
+ initSession, err := mb.setCmd(i.withImagePath(mb.imagePath)).run()
+ Expect(err).To(BeNil())
+ Expect(initSession).To(Exit(0))
+
+ info = new(infoMachine)
+ infoSession, err = mb.setCmd(info.withFormat("{{.Host.NumberOfMachines}}")).run()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(infoSession).Should(Exit(0))
+ Expect(infoSession.outputToString()).To(Equal("1"))
+
+ // Check if json is in correct format
+ infoSession, err = mb.setCmd(info.withFormat("json")).run()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(infoSession).Should(Exit(0))
+
+ infoReport := &machine.Info{}
+ err = jsoniter.Unmarshal(infoSession.Bytes(), infoReport)
+ Expect(err).To(BeNil())
+ })
+})
diff --git a/pkg/machine/e2e/init_test.go b/pkg/machine/e2e/init_test.go
index 6949eb0af..40f140cae 100644
--- a/pkg/machine/e2e/init_test.go
+++ b/pkg/machine/e2e/init_test.go
@@ -3,6 +3,7 @@ package e2e
import (
"io/ioutil"
"os"
+ "runtime"
"time"
"github.com/containers/podman/v4/pkg/machine"
@@ -44,9 +45,9 @@ var _ = Describe("podman machine init", func() {
Expect(len(inspectBefore)).To(BeNumerically(">", 0))
testMachine := inspectBefore[0]
- Expect(testMachine.VM.Name).To(Equal(mb.names[0]))
- Expect(testMachine.VM.CPUs).To(Equal(uint64(1)))
- Expect(testMachine.VM.Memory).To(Equal(uint64(2048)))
+ Expect(testMachine.Name).To(Equal(mb.names[0]))
+ Expect(testMachine.Resources.CPUs).To(Equal(uint64(1)))
+ Expect(testMachine.Resources.Memory).To(Equal(uint64(2048)))
})
@@ -61,7 +62,7 @@ var _ = Describe("podman machine init", func() {
Expect(len(inspectBefore)).To(BeNumerically(">", 0))
Expect(err).To(BeNil())
Expect(len(inspectBefore)).To(BeNumerically(">", 0))
- Expect(inspectBefore[0].VM.Name).To(Equal(mb.names[0]))
+ Expect(inspectBefore[0].Name).To(Equal(mb.names[0]))
s := startMachine{}
ssession, err := mb.setCmd(s).setTimeout(time.Minute * 10).run()
@@ -104,7 +105,15 @@ var _ = Describe("podman machine init", func() {
memorySession, err := mb.setName(name).setCmd(sshMemory.withSSHComand([]string{"cat", "/proc/meminfo", "|", "numfmt", "--field", "2", "--from-unit=Ki", "--to-unit=Mi", "|", "sed", "'s/ kB/M/g'", "|", "grep", "MemTotal"})).run()
Expect(err).To(BeNil())
Expect(memorySession).To(Exit(0))
- Expect(memorySession.outputToString()).To(ContainSubstring("3824"))
+ switch runtime.GOOS {
+ // os's handle memory differently
+ case "linux":
+ Expect(memorySession.outputToString()).To(ContainSubstring("3821"))
+ case "darwin":
+ Expect(memorySession.outputToString()).To(ContainSubstring("3824"))
+ default:
+ // add windows when testing on that platform
+ }
sshTimezone := sshMachine{}
timezoneSession, err := mb.setName(name).setCmd(sshTimezone.withSSHComand([]string{"date"})).run()
diff --git a/pkg/machine/e2e/inspect_test.go b/pkg/machine/e2e/inspect_test.go
index 2c9de5664..93fb8cc2b 100644
--- a/pkg/machine/e2e/inspect_test.go
+++ b/pkg/machine/e2e/inspect_test.go
@@ -1,10 +1,9 @@
package e2e
import (
- "encoding/json"
+ "strings"
"github.com/containers/podman/v4/pkg/machine"
- "github.com/containers/podman/v4/pkg/machine/qemu"
jsoniter "github.com/json-iterator/go"
. "github.com/onsi/ginkgo"
@@ -50,24 +49,6 @@ var _ = Describe("podman machine stop", func() {
Expect(err).To(BeNil())
Expect(inspectSession).To(Exit(0))
Expect(inspectSession.Bytes()).To(ContainSubstring("foo1"))
-
- type fakeInfos struct {
- Status string
- VM qemu.MachineVM
- }
- infos := make([]fakeInfos, 0, 2)
- err = json.Unmarshal(inspectSession.Bytes(), &infos)
- Expect(err).ToNot(HaveOccurred())
- Expect(len(infos)).To(Equal(2))
-
- // rm := new(rmMachine)
- // // Must manually clean up due to multiple names
- // for _, name := range []string{"foo1", "foo2"} {
- // mb.setName(name).setCmd(rm.withForce()).run()
- // mb.names = []string{}
- // }
- // mb.names = []string{}
-
})
It("inspect with go format", func() {
@@ -86,6 +67,7 @@ var _ = Describe("podman machine stop", func() {
var inspectInfo []machine.InspectInfo
err = jsoniter.Unmarshal(inspectSession.Bytes(), &inspectInfo)
Expect(err).To(BeNil())
+ Expect(strings.HasSuffix(inspectInfo[0].ConnectionInfo.PodmanSocket.GetPath(), "podman.sock"))
inspect := new(inspectMachine)
inspect = inspect.withFormat("{{.Name}}")
diff --git a/pkg/machine/e2e/list_test.go b/pkg/machine/e2e/list_test.go
index 0bc867047..fb855c61e 100644
--- a/pkg/machine/e2e/list_test.go
+++ b/pkg/machine/e2e/list_test.go
@@ -2,9 +2,10 @@ package e2e
import (
"strings"
+ "time"
- "github.com/containers/buildah/util"
- "github.com/containers/podman/v4/cmd/podman/machine"
+ "github.com/containers/common/pkg/util"
+ "github.com/containers/podman/v4/pkg/domain/entities"
jsoniter "github.com/json-iterator/go"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -29,7 +30,7 @@ var _ = Describe("podman machine list", func() {
firstList, err := mb.setCmd(list).run()
Expect(err).NotTo(HaveOccurred())
Expect(firstList).Should(Exit(0))
- Expect(len(firstList.outputToStringSlice())).To(Equal(1)) // just the header
+ Expect(firstList.outputToStringSlice()).To(HaveLen(1)) // just the header
i := new(initMachine)
session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run()
@@ -39,7 +40,7 @@ var _ = Describe("podman machine list", func() {
secondList, err := mb.setCmd(list).run()
Expect(err).NotTo(HaveOccurred())
Expect(secondList).To(Exit(0))
- Expect(len(secondList.outputToStringSlice())).To(Equal(2)) // one machine and the header
+ Expect(secondList.outputToStringSlice()).To(HaveLen(2)) // one machine and the header
})
It("list machines with quiet or noheading", func() {
@@ -51,12 +52,12 @@ var _ = Describe("podman machine list", func() {
firstList, err := mb.setCmd(list.withQuiet()).run()
Expect(err).NotTo(HaveOccurred())
Expect(firstList).Should(Exit(0))
- Expect(len(firstList.outputToStringSlice())).To(Equal(0)) // No header with quiet
+ Expect(firstList.outputToStringSlice()).To(HaveLen(0)) // No header with quiet
noheaderSession, err := mb.setCmd(list.withNoHeading()).run() // noheader
Expect(err).NotTo(HaveOccurred())
Expect(noheaderSession).Should(Exit(0))
- Expect(len(noheaderSession.outputToStringSlice())).To(Equal(0))
+ Expect(noheaderSession.outputToStringSlice()).To(HaveLen(0))
i := new(initMachine)
session, err := mb.setName(name1).setCmd(i.withImagePath(mb.imagePath)).run()
@@ -70,7 +71,7 @@ var _ = Describe("podman machine list", func() {
secondList, err := mb.setCmd(list.withQuiet()).run()
Expect(err).NotTo(HaveOccurred())
Expect(secondList).To(Exit(0))
- Expect(len(secondList.outputToStringSlice())).To(Equal(2)) // two machines, no header
+ Expect(secondList.outputToStringSlice()).To(HaveLen(2)) // two machines, no header
listNames := secondList.outputToStringSlice()
stripAsterisk(listNames)
@@ -87,7 +88,7 @@ var _ = Describe("podman machine list", func() {
startSession, err := mb.setCmd(s).runWithoutWait()
Expect(err).To(BeNil())
l := new(listMachine)
- for { // needs to be infinite because we need to check if running when inspect returns to avoid race conditions.
+ for i := 0; i < 30; i++ {
listSession, err := mb.setCmd(l).run()
Expect(listSession).To(Exit(0))
Expect(err).To(BeNil())
@@ -96,6 +97,7 @@ var _ = Describe("podman machine list", func() {
} else {
break
}
+ time.Sleep(3 * time.Second)
}
Expect(startSession).To(Exit(0))
listSession, err := mb.setCmd(l).run()
@@ -116,10 +118,10 @@ var _ = Describe("podman machine list", func() {
// go format
list := new(listMachine)
- listSession, err := mb.setCmd(list.withFormat("{{.Name}}").withNoHeading()).run()
+ listSession, err := mb.setCmd(list.withFormat("{{.Name}}")).run()
Expect(err).NotTo(HaveOccurred())
Expect(listSession).To(Exit(0))
- Expect(len(listSession.outputToStringSlice())).To(Equal(1))
+ Expect(listSession.outputToStringSlice()).To(HaveLen(1))
listNames := listSession.outputToStringSlice()
stripAsterisk(listNames)
@@ -128,13 +130,21 @@ var _ = Describe("podman machine list", func() {
// --format json
list2 := new(listMachine)
list2 = list2.withFormat("json")
- listSession2, err := mb.setName("foo1").setCmd(list2).run()
+ listSession2, err := mb.setCmd(list2).run()
Expect(err).To(BeNil())
Expect(listSession2).To(Exit(0))
- var listResponse []*machine.ListReporter
+ var listResponse []*entities.ListReporter
err = jsoniter.Unmarshal(listSession.Bytes(), &listResponse)
Expect(err).To(BeNil())
+
+ // table format includes the header
+ list = new(listMachine)
+ listSession3, err3 := mb.setCmd(list.withFormat("table {{.Name}}")).run()
+ Expect(err3).NotTo(HaveOccurred())
+ Expect(listSession3).To(Exit(0))
+ listNames3 := listSession3.outputToStringSlice()
+ Expect(listNames3).To(HaveLen(2))
})
})
diff --git a/pkg/machine/e2e/machine_test.go b/pkg/machine/e2e/machine_test.go
index 657014b05..7b063937d 100644
--- a/pkg/machine/e2e/machine_test.go
+++ b/pkg/machine/e2e/machine_test.go
@@ -22,7 +22,7 @@ func TestMain(m *testing.M) {
}
const (
- defaultStream string = "podman-testing"
+ defaultStream string = "testing"
)
var (
@@ -97,6 +97,9 @@ func setup() (string, *machineTestBuilder) {
if err := os.Setenv("HOME", homeDir); err != nil {
Fail("failed to set home dir")
}
+ if err := os.Setenv("XDG_RUNTIME_DIR", homeDir); err != nil {
+ Fail("failed to set xdg_runtime dir")
+ }
if err := os.Unsetenv("SSH_AUTH_SOCK"); err != nil {
Fail("unable to unset SSH_AUTH_SOCK")
}
@@ -120,9 +123,9 @@ func setup() (string, *machineTestBuilder) {
}
func teardown(origHomeDir string, testDir string, mb *machineTestBuilder) {
- s := new(stopMachine)
+ r := new(rmMachine)
for _, name := range mb.names {
- if _, err := mb.setName(name).setCmd(s).run(); err != nil {
+ if _, err := mb.setName(name).setCmd(r.withForce()).run(); err != nil {
fmt.Printf("error occurred rm'ing machine: %q\n", err)
}
}
diff --git a/pkg/machine/e2e/set_test.go b/pkg/machine/e2e/set_test.go
index 15215a44d..80cb89488 100644
--- a/pkg/machine/e2e/set_test.go
+++ b/pkg/machine/e2e/set_test.go
@@ -1,6 +1,8 @@
package e2e
import (
+ "runtime"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
@@ -57,8 +59,15 @@ var _ = Describe("podman machine set", func() {
memorySession, err := mb.setName(name).setCmd(sshMemory.withSSHComand([]string{"cat", "/proc/meminfo", "|", "numfmt", "--field", "2", "--from-unit=Ki", "--to-unit=Mi", "|", "sed", "'s/ kB/M/g'", "|", "grep", "MemTotal"})).run()
Expect(err).To(BeNil())
Expect(memorySession).To(Exit(0))
- Expect(memorySession.outputToString()).To(ContainSubstring("3824"))
-
+ switch runtime.GOOS {
+ // it seems macos and linux handle memory differently
+ case "linux":
+ Expect(memorySession.outputToString()).To(ContainSubstring("3821"))
+ case "darwin":
+ Expect(memorySession.outputToString()).To(ContainSubstring("3824"))
+ default:
+ // windows can go here if we ever run tests there
+ }
// Setting a running machine results in 125
runner, err := mb.setName(name).setCmd(set.withCPUs(4)).run()
Expect(err).To(BeNil())
diff --git a/pkg/machine/e2e/ssh_test.go b/pkg/machine/e2e/ssh_test.go
index 155d39a64..9ee31ac26 100644
--- a/pkg/machine/e2e/ssh_test.go
+++ b/pkg/machine/e2e/ssh_test.go
@@ -56,5 +56,12 @@ var _ = Describe("podman machine ssh", func() {
Expect(err).To(BeNil())
Expect(sshSession).To(Exit(0))
Expect(sshSession.outputToString()).To(ContainSubstring("Fedora CoreOS"))
+
+ // keep exit code
+ sshSession, err = mb.setName(name).setCmd(ssh.withSSHComand([]string{"false"})).run()
+ Expect(err).To(BeNil())
+ Expect(sshSession).To(Exit(1))
+ Expect(sshSession.outputToString()).To(Equal(""))
+ Expect(sshSession.errorToString()).To(Equal(""))
})
})
diff --git a/pkg/machine/fcos.go b/pkg/machine/fcos.go
index df58b8a1e..4ccb99e96 100644
--- a/pkg/machine/fcos.go
+++ b/pkg/machine/fcos.go
@@ -17,7 +17,6 @@ import (
"github.com/coreos/stream-metadata-go/fedoracoreos"
"github.com/coreos/stream-metadata-go/release"
"github.com/coreos/stream-metadata-go/stream"
- "github.com/pkg/errors"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
@@ -139,20 +138,13 @@ func getStreamURL(streamType string) url2.URL {
// This should get Exported and stay put as it will apply to all fcos downloads
// getFCOS parses fedoraCoreOS's stream and returns the image download URL and the release version
-func GetFCOSDownload(imageStream string) (*FcosDownloadInfo, error) { //nolint:staticcheck
+func GetFCOSDownload(imageStream string) (*FcosDownloadInfo, error) {
var (
fcosstable stream.Stream
altMeta release.Release
streamType string
)
- // This is being hard set to testing. Once podman4 is in the
- // fcos trees, we should remove it and re-release at least on
- // macs.
- // TODO: remove when podman4.0 is in coreos
-
- imageStream = "podman-testing" //nolint:staticcheck
-
switch imageStream {
case "podman-testing":
streamType = "podman-testing"
@@ -163,7 +155,7 @@ func GetFCOSDownload(imageStream string) (*FcosDownloadInfo, error) { //nolint:s
case "stable":
streamType = fedoracoreos.StreamStable
default:
- return nil, errors.Errorf("invalid stream %s: valid streams are `testing` and `stable`", imageStream)
+ return nil, fmt.Errorf("invalid stream %s: valid streams are `testing` and `stable`", imageStream)
}
streamurl := getStreamURL(streamType)
resp, err := http.Get(streamurl.String())
diff --git a/pkg/machine/fedora.go b/pkg/machine/fedora.go
index 14a173da6..46e450418 100644
--- a/pkg/machine/fedora.go
+++ b/pkg/machine/fedora.go
@@ -4,6 +4,7 @@
package machine
import (
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -13,7 +14,6 @@ import (
"path/filepath"
"regexp"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -107,12 +107,12 @@ func getFedoraDownload(releaseStream string) (string, *url.URL, int64, error) {
newLocation := rawURL + file
downloadURL, err := url.Parse(newLocation)
if err != nil {
- return "", nil, -1, errors.Wrapf(err, "invalid URL generated from discovered Fedora file: %s", newLocation)
+ return "", nil, -1, fmt.Errorf("invalid URL generated from discovered Fedora file: %s: %w", newLocation, err)
}
resp, err := http.Head(newLocation)
if err != nil {
- return "", nil, -1, errors.Wrapf(err, "head request failed: %s", newLocation)
+ return "", nil, -1, fmt.Errorf("head request failed: %s: %w", newLocation, err)
}
_ = resp.Body.Close()
diff --git a/pkg/machine/ignition.go b/pkg/machine/ignition.go
index 35a9a30cb..f4602cc95 100644
--- a/pkg/machine/ignition.go
+++ b/pkg/machine/ignition.go
@@ -93,7 +93,7 @@ func NewIgnitionFile(ign DynamicIgnition) error {
tz string
)
// local means the same as the host
- // lookup where it is pointing to on the host
+ // look up where it is pointing to on the host
if ign.TimeZone == "local" {
tz, err = getLocalTimeZone()
if err != nil {
@@ -348,7 +348,7 @@ Delegate=memory pids cpu io
},
})
- // Setup /etc/subuid and /etc/subgid
+ // Set up /etc/subuid and /etc/subgid
for _, sub := range []string{"/etc/subuid", "/etc/subgid"} {
files = append(files, File{
Node: Node{
diff --git a/pkg/machine/keys.go b/pkg/machine/keys.go
index 15c1f73d8..0c7d7114e 100644
--- a/pkg/machine/keys.go
+++ b/pkg/machine/keys.go
@@ -6,6 +6,7 @@ package machine
import (
"errors"
"fmt"
+ "io"
"io/ioutil"
"os"
"os/exec"
@@ -51,7 +52,23 @@ func CreateSSHKeysPrefix(dir string, file string, passThru bool, skipExisting bo
// generatekeys creates an ed25519 set of keys
func generatekeys(writeLocation string) error {
args := append(append([]string{}, sshCommand[1:]...), writeLocation)
- return exec.Command(sshCommand[0], args...).Run()
+ cmd := exec.Command(sshCommand[0], args...)
+ stdErr, err := cmd.StderrPipe()
+ if err != nil {
+ return err
+ }
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ waitErr := cmd.Wait()
+ if waitErr == nil {
+ return nil
+ }
+ errMsg, err := io.ReadAll(stdErr)
+ if err != nil {
+ return fmt.Errorf("key generation failed, unable to read from stderr: %w", waitErr)
+ }
+ return fmt.Errorf("failed to generate keys: %s: %w", string(errMsg), waitErr)
}
// generatekeys creates an ed25519 set of keys
@@ -59,7 +76,16 @@ func generatekeysPrefix(dir string, file string, passThru bool, prefix ...string
args := append([]string{}, prefix[1:]...)
args = append(args, sshCommand...)
args = append(args, file)
- cmd := exec.Command(prefix[0], args...)
+
+ binary, err := exec.LookPath(prefix[0])
+ if err != nil {
+ return err
+ }
+ binary, err = filepath.Abs(binary)
+ if err != nil {
+ return err
+ }
+ cmd := exec.Command(binary, args...)
cmd.Dir = dir
if passThru {
cmd.Stdin = os.Stdin
diff --git a/pkg/machine/qemu/config.go b/pkg/machine/qemu/config.go
index 56c95e3b3..bada1af9b 100644
--- a/pkg/machine/qemu/config.go
+++ b/pkg/machine/qemu/config.go
@@ -72,8 +72,10 @@ type MachineVM struct {
Mounts []machine.Mount
// Name of VM
Name string
- // PidFilePath is the where the PID file lives
+ // PidFilePath is the where the Proxy PID file lives
PidFilePath machine.VMFile
+ // VMPidFilePath is the where the VM PID file lives
+ VMPidFilePath machine.VMFile
// QMPMonitor is the qemu monitor object for sending commands
QMPMonitor Monitor
// ReadySocket tells host when vm is booted
diff --git a/pkg/machine/qemu/config_test.go b/pkg/machine/qemu/config_test.go
index 4d96ec6e7..72cb3ed90 100644
--- a/pkg/machine/qemu/config_test.go
+++ b/pkg/machine/qemu/config_test.go
@@ -1,3 +1,6 @@
+//go:build (amd64 && !windows) || (arm64 && !windows)
+// +build amd64,!windows arm64,!windows
+
package qemu
import (
diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go
index 6e36b0886..6134e69e1 100644
--- a/pkg/machine/qemu/machine.go
+++ b/pkg/machine/qemu/machine.go
@@ -5,9 +5,11 @@ package qemu
import (
"bufio"
+ "bytes"
"context"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io/fs"
"io/ioutil"
@@ -16,9 +18,11 @@ import (
"net/url"
"os"
"os/exec"
+ "os/signal"
"path/filepath"
"strconv"
"strings"
+ "syscall"
"time"
"github.com/containers/common/pkg/config"
@@ -28,8 +32,8 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/digitalocean/go-qemu/qmp"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
)
var (
@@ -105,6 +109,9 @@ func (p *Provider) NewMachine(opts machine.InitOptions) (machine.VM, error) {
if err != nil {
return nil, err
}
+ if err := vm.setPIDSocket(); err != nil {
+ return nil, err
+ }
cmd := []string{execPath}
// Add memory
cmd = append(cmd, []string{"-m", strconv.Itoa(int(vm.Memory))}...)
@@ -132,12 +139,12 @@ func (p *Provider) NewMachine(opts machine.InitOptions) (machine.VM, error) {
cmd = append(cmd, []string{
"-device", "virtio-serial",
// qemu needs to establish the long name; other connections can use the symlink'd
- "-chardev", "socket,path=" + vm.ReadySocket.Path + ",server=on,wait=off,id=" + vm.Name + "_ready",
- "-device", "virtserialport,chardev=" + vm.Name + "_ready" + ",name=org.fedoraproject.port.0"}...)
+ // Note both id and chardev start with an extra "a" because qemu requires that it
+ // starts with an letter but users can also use numbers
+ "-chardev", "socket,path=" + vm.ReadySocket.Path + ",server=on,wait=off,id=a" + vm.Name + "_ready",
+ "-device", "virtserialport,chardev=a" + vm.Name + "_ready" + ",name=org.fedoraproject.port.0",
+ "-pidfile", vm.VMPidFilePath.GetPath()}...)
vm.CmdLine = cmd
- if err := vm.setPIDSocket(); err != nil {
- return nil, err
- }
return vm, nil
}
@@ -207,7 +214,7 @@ func migrateVM(configPath string, config []byte, vm *MachineVM) error {
vm.Rootful = old.Rootful
vm.UID = old.UID
- // Backup the original config file
+ // Back up the original config file
if err := os.Rename(configPath, configPath+".orig"); err != nil {
return err
}
@@ -314,6 +321,7 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) {
source := paths[0]
target := source
readonly := false
+ securityModel := "mapped-xattr"
if len(paths) > 1 {
target = paths[1]
}
@@ -321,18 +329,20 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) {
options := paths[2]
volopts := strings.Split(options, ",")
for _, o := range volopts {
- switch o {
- case "rw":
+ switch {
+ case o == "rw":
readonly = false
- case "ro":
+ case o == "ro":
readonly = true
+ case strings.HasPrefix(o, "security_model="):
+ securityModel = strings.Split(o, "=")[1]
default:
fmt.Printf("Unknown option: %s\n", o)
}
}
}
if volumeType == VolumeTypeVirtfs {
- virtfsOptions := fmt.Sprintf("local,path=%s,mount_tag=%s,security_model=mapped-xattr", source, tag)
+ virtfsOptions := fmt.Sprintf("local,path=%s,mount_tag=%s,security_model=%s", source, tag, securityModel)
if readonly {
virtfsOptions += ",readonly"
}
@@ -430,12 +440,12 @@ func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) {
if v.Name != machine.DefaultMachineName {
suffix = " " + v.Name
}
- return setErrors, errors.Errorf("cannot change settings while the vm is running, run 'podman machine stop%s' first", suffix)
+ return setErrors, fmt.Errorf("cannot change settings while the vm is running, run 'podman machine stop%s' first", suffix)
}
if opts.Rootful != nil && v.Rootful != *opts.Rootful {
if err := v.setRootful(*opts.Rootful); err != nil {
- setErrors = append(setErrors, errors.Wrapf(err, "failed to set rootful option"))
+ setErrors = append(setErrors, fmt.Errorf("failed to set rootful option: %w", err))
} else {
v.Rootful = *opts.Rootful
}
@@ -453,7 +463,7 @@ func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) {
if opts.DiskSize != nil && v.DiskSize != *opts.DiskSize {
if err := v.resizeDisk(*opts.DiskSize, v.DiskSize); err != nil {
- setErrors = append(setErrors, errors.Wrapf(err, "failed to resize disk"))
+ setErrors = append(setErrors, fmt.Errorf("failed to resize disk: %w", err))
} else {
v.DiskSize = *opts.DiskSize
}
@@ -484,19 +494,33 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
if err := v.writeConfig(); err != nil {
return fmt.Errorf("writing JSON file: %w", err)
}
- defer func() {
+ doneStarting := func() {
v.Starting = false
if err := v.writeConfig(); err != nil {
logrus.Errorf("Writing JSON file: %v", err)
}
+ }
+ defer doneStarting()
+
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ _, ok := <-c
+ if !ok {
+ return
+ }
+ doneStarting()
+ os.Exit(1)
}()
+ defer close(c)
+
if v.isIncompatible() {
logrus.Errorf("machine %q is incompatible with this release of podman and needs to be recreated, starting for recovery only", v.Name)
}
forwardSock, forwardState, err := v.startHostNetworking()
if err != nil {
- return errors.Errorf("unable to start host networking: %q", err)
+ return fmt.Errorf("unable to start host networking: %q", err)
}
rtPath, err := getRuntimeDir()
@@ -550,34 +574,46 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
files := []*os.File{dnr, dnw, dnw, fd}
attr.Files = files
logrus.Debug(v.CmdLine)
- cmd := v.CmdLine
+ cmdLine := v.CmdLine
// Disable graphic window when not in debug mode
// Done in start, so we're not suck with the debug level we used on init
if !logrus.IsLevelEnabled(logrus.DebugLevel) {
- cmd = append(cmd, "-display", "none")
+ cmdLine = append(cmdLine, "-display", "none")
}
- _, err = os.StartProcess(v.CmdLine[0], cmd, attr)
+ stderrBuf := &bytes.Buffer{}
+
+ cmd := &exec.Cmd{
+ Args: cmdLine,
+ Path: cmdLine[0],
+ Stdin: dnr,
+ Stdout: dnw,
+ Stderr: stderrBuf,
+ ExtraFiles: []*os.File{fd},
+ }
+ err = cmd.Start()
if err != nil {
// check if qemu was not found
if !errors.Is(err, os.ErrNotExist) {
return err
}
- // lookup qemu again maybe the path was changed, https://github.com/containers/podman/issues/13394
+ // look up qemu again maybe the path was changed, https://github.com/containers/podman/issues/13394
cfg, err := config.Default()
if err != nil {
return err
}
- cmd[0], err = cfg.FindHelperBinary(QemuCommand, true)
+ cmdLine[0], err = cfg.FindHelperBinary(QemuCommand, true)
if err != nil {
return err
}
- _, err = os.StartProcess(cmd[0], cmd, attr)
+ cmd.Path = cmdLine[0]
+ err = cmd.Start()
if err != nil {
- return errors.Wrapf(err, "unable to execute %q", cmd)
+ return fmt.Errorf("unable to execute %q: %w", cmd, err)
}
}
+ defer cmd.Process.Release() //nolint:errcheck
fmt.Println("Waiting for VM ...")
socketPath, err := getRuntimeDir()
if err != nil {
@@ -592,6 +628,16 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
if err == nil {
break
}
+ // check if qemu is still alive
+ var status syscall.WaitStatus
+ pid, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
+ if err != nil {
+ return fmt.Errorf("failed to read qemu process status: %w", err)
+ }
+ if pid > 0 {
+ // child exited
+ return fmt.Errorf("qemu exited unexpectedly with exit code %d, stderr: %s", status.ExitStatus(), stderrBuf.String())
+ }
time.Sleep(wait)
wait++
}
@@ -682,7 +728,7 @@ func (v *MachineVM) checkStatus(monitor *qmp.SocketMonitor) (machine.Status, err
}
b, err := monitor.Run(input)
if err != nil {
- if errors.Cause(err) == os.ErrNotExist {
+ if errors.Is(err, os.ErrNotExist) {
return machine.Stopped, nil
}
return "", err
@@ -737,17 +783,17 @@ func (v *MachineVM) Stop(_ string, _ machine.StopOptions) error {
if _, err := os.Stat(v.PidFilePath.GetPath()); os.IsNotExist(err) {
return nil
}
- pidString, err := v.PidFilePath.Read()
+ proxyPidString, err := v.PidFilePath.Read()
if err != nil {
return err
}
- pidNum, err := strconv.Atoi(string(pidString))
+ proxyPid, err := strconv.Atoi(string(proxyPidString))
if err != nil {
return err
}
- p, err := os.FindProcess(pidNum)
- if p == nil && err != nil {
+ proxyProc, err := os.FindProcess(proxyPid)
+ if proxyProc == nil && err != nil {
return err
}
@@ -756,7 +802,7 @@ func (v *MachineVM) Stop(_ string, _ machine.StopOptions) error {
return err
}
// Kill the process
- if err := p.Kill(); err != nil {
+ if err := proxyProc.Kill(); err != nil {
return err
}
// Remove the pidfile
@@ -770,24 +816,52 @@ func (v *MachineVM) Stop(_ string, _ machine.StopOptions) error {
if err := qmpMonitor.Disconnect(); err != nil {
// FIXME: this error should probably be returned
- return nil // nolint: nilerr
+ return nil //nolint: nilerr
}
-
disconnected = true
- waitInternal := 250 * time.Millisecond
- for i := 0; i < 5; i++ {
- state, err := v.State(false)
- if err != nil {
- return err
- }
- if state != machine.Running {
- break
+
+ if err := v.ReadySocket.Delete(); err != nil {
+ return err
+ }
+
+ if v.VMPidFilePath.GetPath() == "" {
+ // no vm pid file path means it's probably a machine created before we
+ // started using it, so we revert to the old way of waiting for the
+ // machine to stop
+ fmt.Println("Waiting for VM to stop running...")
+ waitInternal := 250 * time.Millisecond
+ for i := 0; i < 5; i++ {
+ state, err := v.State(false)
+ if err != nil {
+ return err
+ }
+ if state != machine.Running {
+ break
+ }
+ time.Sleep(waitInternal)
+ waitInternal *= 2
}
- time.Sleep(waitInternal)
- waitInternal *= 2
+ // after the machine stops running it normally takes about 1 second for the
+ // qemu VM to exit so we wait a bit to try to avoid issues
+ time.Sleep(2 * time.Second)
+ return nil
+ }
+
+ vmPidString, err := v.VMPidFilePath.Read()
+ if err != nil {
+ return err
+ }
+ vmPid, err := strconv.Atoi(strings.TrimSpace(string(vmPidString)))
+ if err != nil {
+ return err
}
- return v.ReadySocket.Delete()
+ fmt.Println("Waiting for VM to exit...")
+ for isProcessAlive(vmPid) {
+ time.Sleep(500 * time.Millisecond)
+ }
+
+ return nil
}
// NewQMPMonitor creates the monitor subsection of our vm
@@ -831,8 +905,14 @@ func (v *MachineVM) Remove(_ string, opts machine.RemoveOptions) (string, func()
if err != nil {
return "", nil, err
}
- if state == machine.Running && !opts.Force {
- return "", nil, errors.Errorf("running vm %q cannot be destroyed", v.Name)
+ if state == machine.Running {
+ if !opts.Force {
+ return "", nil, fmt.Errorf("running vm %q cannot be destroyed", v.Name)
+ }
+ err := v.Stop(v.Name, machine.StopOptions{})
+ if err != nil {
+ return "", nil, err
+ }
}
// Collect all the files that need to be destroyed
@@ -874,8 +954,11 @@ func (v *MachineVM) Remove(_ string, opts machine.RemoveOptions) (string, func()
// remove socket and pid file if any: warn at low priority if things fail
// Remove the pidfile
+ if err := v.VMPidFilePath.Delete(); err != nil {
+ logrus.Debugf("Error while removing VM pidfile: %v", err)
+ }
if err := v.PidFilePath.Delete(); err != nil {
- logrus.Debugf("Error while removing pidfile: %v", err)
+ logrus.Debugf("Error while removing proxy pidfile: %v", err)
}
// Remove socket
if err := v.QMPMonitor.Address.Delete(); err != nil {
@@ -904,11 +987,16 @@ func (v *MachineVM) State(bypass bool) (machine.Status, error) {
}
// Check if we can dial it
if v.Starting && !bypass {
- return "", nil
+ return machine.Starting, nil
}
monitor, err := qmp.NewSocketMonitor(v.QMPMonitor.Network, v.QMPMonitor.Address.GetPath(), v.QMPMonitor.Timeout)
if err != nil {
- // FIXME: this error should probably be returned
+ // If an improper cleanup was done and the socketmonitor was not deleted,
+ // it can appear as though the machine state is not stopped. Check for ECONNREFUSED
+ // almost assures us that the vm is stopped.
+ if errors.Is(err, syscall.ECONNREFUSED) {
+ return machine.Stopped, nil
+ }
return "", err
}
if err := monitor.Connect(); err != nil {
@@ -941,7 +1029,7 @@ func (v *MachineVM) SSH(_ string, opts machine.SSHOptions) error {
return err
}
if state != machine.Running {
- return errors.Errorf("vm %q is not running", v.Name)
+ return fmt.Errorf("vm %q is not running", v.Name)
}
username := opts.Username
@@ -952,7 +1040,8 @@ func (v *MachineVM) SSH(_ string, opts machine.SSHOptions) error {
sshDestination := username + "@localhost"
port := strconv.Itoa(v.Port)
- args := []string{"-i", v.IdentityPath, "-p", port, sshDestination, "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"}
+ args := []string{"-i", v.IdentityPath, "-p", port, sshDestination, "-o", "UserKnownHostsFile=/dev/null",
+ "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", "-o", "SetEnv=LC_ALL="}
if len(opts.Args) > 0 {
args = append(args, opts.Args...)
} else {
@@ -1051,6 +1140,7 @@ func getVMInfos() ([]*machine.ListResponse, error) {
listEntry.RemoteUsername = vm.RemoteUsername
listEntry.IdentityPath = vm.IdentityPath
listEntry.CreatedAt = vm.Created
+ listEntry.Starting = vm.Starting
if listEntry.CreatedAt.IsZero() {
listEntry.CreatedAt = time.Now()
@@ -1064,6 +1154,7 @@ func getVMInfos() ([]*machine.ListResponse, error) {
if err != nil {
return err
}
+ listEntry.Running = state == machine.Running
if !vm.LastUp.IsZero() { // this means we have already written a time to the config
listEntry.LastUp = vm.LastUp
@@ -1074,9 +1165,6 @@ func getVMInfos() ([]*machine.ListResponse, error) {
return err
}
}
- if state == machine.Running {
- listEntry.Running = true
- }
listed = append(listed, listEntry)
}
@@ -1105,10 +1193,10 @@ func (p *Provider) IsValidVMName(name string) (bool, error) {
func (p *Provider) CheckExclusiveActiveVM() (bool, string, error) {
vms, err := getVMInfos()
if err != nil {
- return false, "", errors.Wrap(err, "error checking VM active")
+ return false, "", fmt.Errorf("error checking VM active: %w", err)
}
for _, vm := range vms {
- if vm.Running {
+ if vm.Running || vm.Starting {
return true, vm.Name, nil
}
}
@@ -1116,7 +1204,7 @@ func (p *Provider) CheckExclusiveActiveVM() (bool, string, error) {
}
// startHostNetworking runs a binary on the host system that allows users
-// to setup port forwarding to the podman virtual machine
+// to set up port forwarding to the podman virtual machine
func (v *MachineVM) startHostNetworking() (string, apiForwardingState, error) {
cfg, err := config.Default()
if err != nil {
@@ -1157,7 +1245,10 @@ func (v *MachineVM) startHostNetworking() (string, apiForwardingState, error) {
fmt.Println(cmd)
}
_, err = os.StartProcess(cmd[0], cmd, attr)
- return forwardSock, state, errors.Wrapf(err, "unable to execute: %q", cmd)
+ if err != nil {
+ return "", 0, fmt.Errorf("unable to execute: %q: %w", cmd, err)
+ }
+ return forwardSock, state, nil
}
func (v *MachineVM) setupAPIForwarding(cmd []string) ([]string, string, apiForwardingState) {
@@ -1283,13 +1374,19 @@ func (v *MachineVM) setPIDSocket() error {
if !rootless.IsRootless() {
rtPath = "/run"
}
- pidFileName := fmt.Sprintf("%s.pid", v.Name)
socketDir := filepath.Join(rtPath, "podman")
- pidFilePath, err := machine.NewMachineFile(filepath.Join(socketDir, pidFileName), &pidFileName)
+ vmPidFileName := fmt.Sprintf("%s_vm.pid", v.Name)
+ proxyPidFileName := fmt.Sprintf("%s_proxy.pid", v.Name)
+ vmPidFilePath, err := machine.NewMachineFile(filepath.Join(socketDir, vmPidFileName), &vmPidFileName)
+ if err != nil {
+ return err
+ }
+ proxyPidFilePath, err := machine.NewMachineFile(filepath.Join(socketDir, proxyPidFileName), &proxyPidFileName)
if err != nil {
return err
}
- v.PidFilePath = *pidFilePath
+ v.VMPidFilePath = *vmPidFilePath
+ v.PidFilePath = *proxyPidFilePath
return nil
}
@@ -1420,7 +1517,7 @@ func (v *MachineVM) update() error {
b, err := v.ConfigPath.Read()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
- return errors.Wrap(machine.ErrNoSuchVM, v.Name)
+ return fmt.Errorf("%v: %w", v.Name, machine.ErrNoSuchVM)
}
return err
}
@@ -1471,16 +1568,22 @@ func (v *MachineVM) Inspect() (*machine.InspectInfo, error) {
if err != nil {
return nil, err
}
-
+ connInfo := new(machine.ConnectionConfig)
+ podmanSocket, err := v.forwardSocketPath()
+ if err != nil {
+ return nil, err
+ }
+ connInfo.PodmanSocket = podmanSocket
return &machine.InspectInfo{
- ConfigPath: v.ConfigPath,
- Created: v.Created,
- Image: v.ImageConfig,
- LastUp: v.LastUp,
- Name: v.Name,
- Resources: v.ResourceConfig,
- SSHConfig: v.SSHConfig,
- State: state,
+ ConfigPath: v.ConfigPath,
+ ConnectionInfo: *connInfo,
+ Created: v.Created,
+ Image: v.ImageConfig,
+ LastUp: v.LastUp,
+ Name: v.Name,
+ Resources: v.ResourceConfig,
+ SSHConfig: v.SSHConfig,
+ State: state,
}, nil
}
@@ -1490,7 +1593,7 @@ func (v *MachineVM) resizeDisk(diskSize uint64, oldSize uint64) error {
// only if the virtualdisk size is less than
// the given disk size
if diskSize < oldSize {
- return errors.Errorf("new disk size must be larger than current disk size: %vGB", oldSize)
+ return fmt.Errorf("new disk size must be larger than current disk size: %vGB", oldSize)
}
// Find the qemu executable
@@ -1506,7 +1609,7 @@ func (v *MachineVM) resizeDisk(diskSize uint64, oldSize uint64) error {
resize.Stdout = os.Stdout
resize.Stderr = os.Stderr
if err := resize.Run(); err != nil {
- return errors.Errorf("resizing image: %q", err)
+ return fmt.Errorf("resizing image: %q", err)
}
return nil
@@ -1545,7 +1648,7 @@ func (v *MachineVM) editCmdLine(flag string, value string) {
}
}
-// RemoveAndCleanMachines removes all machine and cleans up any other files associatied with podman machine
+// RemoveAndCleanMachines removes all machine and cleans up any other files associated with podman machine
func (p *Provider) RemoveAndCleanMachines() error {
var (
vm machine.VM
@@ -1620,3 +1723,15 @@ func (p *Provider) RemoveAndCleanMachines() error {
}
return prevErr
}
+
+func isProcessAlive(pid int) bool {
+ err := unix.Kill(pid, syscall.Signal(0))
+ if err == nil || err == unix.EPERM {
+ return true
+ }
+ return false
+}
+
+func (p *Provider) VMType() string {
+ return vmtype
+}
diff --git a/pkg/machine/qemu/machine_test.go b/pkg/machine/qemu/machine_test.go
index 62ca6068a..4c393d0f4 100644
--- a/pkg/machine/qemu/machine_test.go
+++ b/pkg/machine/qemu/machine_test.go
@@ -1,3 +1,6 @@
+//go:build (amd64 && !windows) || (arm64 && !windows)
+// +build amd64,!windows arm64,!windows
+
package qemu
import (
diff --git a/pkg/machine/qemu/options_darwin_arm64.go b/pkg/machine/qemu/options_darwin_arm64.go
index 4c954af00..d75237938 100644
--- a/pkg/machine/qemu/options_darwin_arm64.go
+++ b/pkg/machine/qemu/options_darwin_arm64.go
@@ -4,6 +4,8 @@ import (
"os"
"os/exec"
"path/filepath"
+
+ "github.com/containers/common/pkg/config"
)
var (
@@ -15,8 +17,8 @@ func (v *MachineVM) addArchOptions() []string {
opts := []string{
"-accel", "hvf",
"-accel", "tcg",
- "-cpu", "cortex-a57",
- "-M", "virt,highmem=off",
+ "-cpu", "host",
+ "-M", "virt,highmem=on",
"-drive", "file=" + getEdk2CodeFd("edk2-aarch64-code.fd") + ",if=pflash,format=raw,readonly=on",
"-drive", "file=" + ovmfDir + ",if=pflash,format=raw"}
return opts
@@ -38,6 +40,22 @@ func getOvmfDir(imagePath, vmName string) string {
}
/*
+ * When QEmu is installed in a non-default location in the system
+ * we can use the qemu-system-* binary path to figure the install
+ * location for Qemu and use it to look for edk2-code-fd
+ */
+func getEdk2CodeFdPathFromQemuBinaryPath() string {
+ cfg, err := config.Default()
+ if err == nil {
+ execPath, err := cfg.FindHelperBinary(QemuCommand, true)
+ if err == nil {
+ return filepath.Clean(filepath.Join(filepath.Dir(execPath), "..", "share", "qemu"))
+ }
+ }
+ return ""
+}
+
+/*
* QEmu can be installed in multiple locations on MacOS, especially on
* Apple Silicon systems. A build from source will likely install it in
* /usr/local/bin, whereas Homebrew package management standard is to
@@ -45,6 +63,7 @@ func getOvmfDir(imagePath, vmName string) string {
*/
func getEdk2CodeFd(name string) string {
dirs := []string{
+ getEdk2CodeFdPathFromQemuBinaryPath(),
"/opt/homebrew/opt/podman/libexec/share/qemu",
"/usr/local/share/qemu",
"/opt/homebrew/share/qemu",
diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go
index 0b2874baf..450d8b877 100644
--- a/pkg/machine/wsl/machine.go
+++ b/pkg/machine/wsl/machine.go
@@ -6,6 +6,7 @@ package wsl
import (
"bufio"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/fs"
@@ -18,10 +19,10 @@ import (
"strings"
"time"
+ "github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/utils"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
@@ -116,6 +117,43 @@ ln -fs /home/[USER]/.config/systemd/[USER]/linger-example.service \
/home/[USER]/.config/systemd/[USER]/default.target.wants/linger-example.service
`
+const proxyConfigSetup = `#!/bin/bash
+
+SYSTEMD_CONF=/etc/systemd/system.conf.d/default-env.conf
+ENVD_CONF=/etc/environment.d/default-env.conf
+PROFILE_CONF=/etc/profile.d/default-env.sh
+
+IFS="|"
+read proxies
+
+mkdir -p /etc/profile.d /etc/environment.d /etc/systemd/system.conf.d/
+rm -f $SYSTEMD_CONF
+for proxy in $proxies; do
+ output+="$proxy "
+done
+echo "[Manager]" >> $SYSTEMD_CONF
+echo -ne "DefaultEnvironment=" >> $SYSTEMD_CONF
+
+echo $output >> $SYSTEMD_CONF
+rm -f $ENVD_CONF
+for proxy in $proxies; do
+ echo "$proxy" >> $ENVD_CONF
+done
+rm -f $PROFILE_CONF
+for proxy in $proxies; do
+ echo "export $proxy" >> $PROFILE_CONF
+done
+`
+
+const proxyConfigAttempt = `if [ -f /usr/local/bin/proxyinit ]; \
+then /usr/local/bin/proxyinit; \
+else exit 42; \
+fi`
+
+const clearProxySettings = `rm -f /etc/systemd/system.conf.d/default-env.conf \
+ /etc/environment.d/default-env.conf \
+ /etc/profile.d/default-env.sh`
+
const wslInstallError = `Could not %s. See previous output for any potential failure details.
If you can not resolve the issue, and rerunning fails, try the "wsl --install" process
outlined in the following article:
@@ -239,7 +277,7 @@ func readAndMigrate(configPath string, name string) (*MachineVM, error) {
b, err := os.ReadFile(configPath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
- return nil, errors.Wrap(machine.ErrNoSuchVM, name)
+ return nil, fmt.Errorf("%v: %w", name, machine.ErrNoSuchVM)
}
return vm, err
}
@@ -300,6 +338,7 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) {
return cont, err
}
+ _ = setupWslProxyEnv()
homeDir := homedir.Get()
sshDir := filepath.Join(homeDir, ".ssh")
v.IdentityPath = filepath.Join(sshDir, v.Name)
@@ -368,7 +407,7 @@ func (v *MachineVM) writeConfig() error {
return err
}
if err := ioutil.WriteFile(jsonFile, b, 0644); err != nil {
- return errors.Wrap(err, "could not write machine json config")
+ return fmt.Errorf("could not write machine json config: %w", err)
}
return nil
@@ -406,38 +445,38 @@ func provisionWSLDist(v *MachineVM) (string, error) {
distDir := filepath.Join(vmDataDir, "wsldist")
distTarget := filepath.Join(distDir, v.Name)
if err := os.MkdirAll(distDir, 0755); err != nil {
- return "", errors.Wrap(err, "could not create wsldist directory")
+ return "", fmt.Errorf("could not create wsldist directory: %w", err)
}
dist := toDist(v.Name)
fmt.Println("Importing operating system into WSL (this may take 5+ minutes on a new WSL install)...")
if err = runCmdPassThrough("wsl", "--import", dist, distTarget, v.ImagePath); err != nil {
- return "", errors.Wrap(err, "WSL import of guest OS failed")
+ return "", fmt.Errorf("the WSL import of guest OS failed: %w", err)
}
fmt.Println("Installing packages (this will take awhile)...")
if err = runCmdPassThrough("wsl", "-d", dist, "dnf", "upgrade", "-y"); err != nil {
- return "", errors.Wrap(err, "package upgrade on guest OS failed")
+ return "", fmt.Errorf("package upgrade on guest OS failed: %w", err)
}
fmt.Println("Enabling Copr")
if err = runCmdPassThrough("wsl", "-d", dist, "dnf", "install", "-y", "'dnf-command(copr)'"); err != nil {
- return "", errors.Wrap(err, "enabling copr failed")
+ return "", fmt.Errorf("enabling copr failed: %w", err)
}
fmt.Println("Enabling podman4 repo")
if err = runCmdPassThrough("wsl", "-d", dist, "dnf", "-y", "copr", "enable", "rhcontainerbot/podman4"); err != nil {
- return "", errors.Wrap(err, "enabling copr failed")
+ return "", fmt.Errorf("enabling copr failed: %w", err)
}
if err = runCmdPassThrough("wsl", "-d", dist, "dnf", "install",
"podman", "podman-docker", "openssh-server", "procps-ng", "-y"); err != nil {
- return "", errors.Wrap(err, "package installation on guest OS failed")
+ return "", fmt.Errorf("package installation on guest OS failed: %w", err)
}
// Fixes newuidmap
if err = runCmdPassThrough("wsl", "-d", dist, "dnf", "reinstall", "shadow-utils", "-y"); err != nil {
- return "", errors.Wrap(err, "package reinstallation of shadow-utils on guest OS failed")
+ return "", fmt.Errorf("package reinstallation of shadow-utils on guest OS failed: %w", err)
}
// Windows 11 (NT Version = 10, Build 22000) generates harmless but scary messages on every
@@ -456,28 +495,28 @@ func createKeys(v *MachineVM, dist string, sshDir string) error {
user := v.RemoteUsername
if err := os.MkdirAll(sshDir, 0700); err != nil {
- return errors.Wrap(err, "could not create ssh directory")
+ return fmt.Errorf("could not create ssh directory: %w", err)
}
if err := runCmdPassThrough("wsl", "--terminate", dist); err != nil {
- return errors.Wrap(err, "could not cycle WSL dist")
+ return fmt.Errorf("could not cycle WSL dist: %w", err)
}
key, err := machine.CreateSSHKeysPrefix(sshDir, v.Name, true, true, "wsl", "-d", dist)
if err != nil {
- return errors.Wrap(err, "could not create ssh keys")
+ return fmt.Errorf("could not create ssh keys: %w", err)
}
if err := pipeCmdPassThrough("wsl", key+"\n", "-d", dist, "sh", "-c", "mkdir -p /root/.ssh;"+
"cat >> /root/.ssh/authorized_keys; chmod 600 /root/.ssh/authorized_keys"); err != nil {
- return errors.Wrap(err, "could not create root authorized keys on guest OS")
+ return fmt.Errorf("could not create root authorized keys on guest OS: %w", err)
}
userAuthCmd := withUser("mkdir -p /home/[USER]/.ssh;"+
"cat >> /home/[USER]/.ssh/authorized_keys; chown -R [USER]:[USER] /home/[USER]/.ssh;"+
"chmod 600 /home/[USER]/.ssh/authorized_keys", user)
if err := pipeCmdPassThrough("wsl", key+"\n", "-d", dist, "sh", "-c", userAuthCmd); err != nil {
- return errors.Wrapf(err, "could not create '%s' authorized keys on guest OS", v.RemoteUsername)
+ return fmt.Errorf("could not create '%s' authorized keys on guest OS: %w", v.RemoteUsername, err)
}
return nil
@@ -486,25 +525,25 @@ func createKeys(v *MachineVM, dist string, sshDir string) error {
func configureSystem(v *MachineVM, dist string) error {
user := v.RemoteUsername
if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", fmt.Sprintf(appendPort, v.Port, v.Port)); err != nil {
- return errors.Wrap(err, "could not configure SSH port for guest OS")
+ return fmt.Errorf("could not configure SSH port for guest OS: %w", err)
}
if err := pipeCmdPassThrough("wsl", withUser(configServices, user), "-d", dist, "sh"); err != nil {
- return errors.Wrap(err, "could not configure systemd settings for guest OS")
+ return fmt.Errorf("could not configure systemd settings for guest OS: %w", err)
}
if err := pipeCmdPassThrough("wsl", sudoers, "-d", dist, "sh", "-c", "cat >> /etc/sudoers"); err != nil {
- return errors.Wrap(err, "could not add wheel to sudoers")
+ return fmt.Errorf("could not add wheel to sudoers: %w", err)
}
if err := pipeCmdPassThrough("wsl", overrideSysusers, "-d", dist, "sh", "-c",
"cat > /etc/systemd/system/systemd-sysusers.service.d/override.conf"); err != nil {
- return errors.Wrap(err, "could not generate systemd-sysusers override for guest OS")
+ return fmt.Errorf("could not generate systemd-sysusers override for guest OS: %w", err)
}
lingerCmd := withUser("cat > /home/[USER]/.config/systemd/[USER]/linger-example.service", user)
if err := pipeCmdPassThrough("wsl", lingerService, "-d", dist, "sh", "-c", lingerCmd); err != nil {
- return errors.Wrap(err, "could not generate linger service for guest OS")
+ return fmt.Errorf("could not generate linger service for guest OS: %w", err)
}
if err := enableUserLinger(v, dist); err != nil {
@@ -512,15 +551,49 @@ func configureSystem(v *MachineVM, dist string) error {
}
if err := pipeCmdPassThrough("wsl", withUser(lingerSetup, user), "-d", dist, "sh"); err != nil {
- return errors.Wrap(err, "could not configure systemd settomgs for guest OS")
+ return fmt.Errorf("could not configure systemd settomgs for guest OS: %w", err)
}
if err := pipeCmdPassThrough("wsl", containersConf, "-d", dist, "sh", "-c", "cat > /etc/containers/containers.conf"); err != nil {
- return errors.Wrap(err, "could not create containers.conf for guest OS")
+ return fmt.Errorf("could not create containers.conf for guest OS: %w", err)
}
if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", "echo wsl > /etc/containers/podman-machine"); err != nil {
- return errors.Wrap(err, "could not create podman-machine file for guest OS")
+ return fmt.Errorf("could not create podman-machine file for guest OS: %w", err)
+ }
+
+ return nil
+}
+
+func configureProxy(dist string, useProxy bool) error {
+ if !useProxy {
+ _ = runCmdPassThrough("wsl", "-d", dist, "sh", "-c", clearProxySettings)
+ return nil
+ }
+ var content string
+ for i, key := range config.ProxyEnv {
+ if value, _ := os.LookupEnv(key); len(value) > 0 {
+ var suffix string
+ if i < (len(config.ProxyEnv) - 1) {
+ suffix = "|"
+ }
+ content = fmt.Sprintf("%s%s=\"%s\"%s", content, key, value, suffix)
+ }
+ }
+
+ if err := pipeCmdPassThrough("wsl", content, "-d", dist, "sh", "-c", proxyConfigAttempt); err != nil {
+ const failMessage = "Failure creating proxy configuration"
+ if exitErr, isExit := err.(*exec.ExitError); isExit && exitErr.ExitCode() != 42 {
+ return fmt.Errorf("%v: %w", failMessage, err)
+ }
+
+ fmt.Println("Installing proxy support")
+ _ = pipeCmdPassThrough("wsl", proxyConfigSetup, "-d", dist, "sh", "-c",
+ "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit")
+
+ if err = pipeCmdPassThrough("wsl", content, "-d", dist, "/usr/local/bin/proxyinit"); err != nil {
+ return fmt.Errorf("%v: %w", failMessage, err)
+ }
}
return nil
@@ -529,7 +602,7 @@ func configureSystem(v *MachineVM, dist string) error {
func enableUserLinger(v *MachineVM, dist string) error {
lingerCmd := "mkdir -p /var/lib/systemd/linger; touch /var/lib/systemd/linger/" + v.RemoteUsername
if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", lingerCmd); err != nil {
- return errors.Wrap(err, "could not enable linger for remote user on guest OS")
+ return fmt.Errorf("could not enable linger for remote user on guest OS: %w", err)
}
return nil
@@ -538,21 +611,26 @@ func enableUserLinger(v *MachineVM, dist string) error {
func installScripts(dist string) error {
if err := pipeCmdPassThrough("wsl", enterns, "-d", dist, "sh", "-c",
"cat > /usr/local/bin/enterns; chmod 755 /usr/local/bin/enterns"); err != nil {
- return errors.Wrap(err, "could not create enterns script for guest OS")
+ return fmt.Errorf("could not create enterns script for guest OS: %w", err)
}
if err := pipeCmdPassThrough("wsl", profile, "-d", dist, "sh", "-c",
"cat > /etc/profile.d/enterns.sh"); err != nil {
- return errors.Wrap(err, "could not create motd profile script for guest OS")
+ return fmt.Errorf("could not create motd profile script for guest OS: %w", err)
}
if err := pipeCmdPassThrough("wsl", wslmotd, "-d", dist, "sh", "-c", "cat > /etc/wslmotd"); err != nil {
- return errors.Wrap(err, "could not create a WSL MOTD for guest OS")
+ return fmt.Errorf("could not create a WSL MOTD for guest OS: %w", err)
}
if err := pipeCmdPassThrough("wsl", bootstrap, "-d", dist, "sh", "-c",
"cat > /root/bootstrap; chmod 755 /root/bootstrap"); err != nil {
- return errors.Wrap(err, "could not create bootstrap script for guest OS")
+ return fmt.Errorf("could not create bootstrap script for guest OS: %w", err)
+ }
+
+ if err := pipeCmdPassThrough("wsl", proxyConfigSetup, "-d", dist, "sh", "-c",
+ "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit"); err != nil {
+ return fmt.Errorf("could not create proxyinit script for guest OS: %w", err)
}
return nil
@@ -595,10 +673,10 @@ func checkAndInstallWSL(opts machine.InitOptions) (bool, error) {
func attemptFeatureInstall(opts machine.InitOptions, admin bool) error {
if !winVersionAtLeast(10, 0, 18362) {
- return errors.Errorf("Your version of Windows does not support WSL. Update to Windows 10 Build 19041 or later")
+ return errors.New("your version of Windows does not support WSL. Update to Windows 10 Build 19041 or later")
} else if !winVersionAtLeast(10, 0, 19041) {
fmt.Fprint(os.Stderr, wslOldVersion)
- return errors.Errorf("WSL can not be automatically installed")
+ return errors.New("the WSL can not be automatically installed")
}
message := "WSL is not installed on this system, installing it.\n\n"
@@ -612,7 +690,7 @@ func attemptFeatureInstall(opts machine.InitOptions, admin bool) error {
"If you prefer, you may abort now, and perform a manual installation using the \"wsl --install\" command."
if !opts.ReExec && MessageBox(message, "Podman Machine", false) != 1 {
- return errors.Errorf("WSL installation aborted")
+ return errors.New("the WSL installation aborted")
}
if !opts.ReExec && !admin {
@@ -648,12 +726,12 @@ func installWsl() error {
defer log.Close()
if err := runCmdPassThroughTee(log, "dism", "/online", "/enable-feature",
"/featurename:Microsoft-Windows-Subsystem-Linux", "/all", "/norestart"); isMsiError(err) {
- return errors.Wrap(err, "could not enable WSL Feature")
+ return fmt.Errorf("could not enable WSL Feature: %w", err)
}
if err = runCmdPassThroughTee(log, "dism", "/online", "/enable-feature",
"/featurename:VirtualMachinePlatform", "/all", "/norestart"); isMsiError(err) {
- return errors.Wrap(err, "could not enable Virtual Machine Feature")
+ return fmt.Errorf("could not enable Virtual Machine Feature: %w", err)
}
log.Close()
@@ -687,7 +765,7 @@ func installWslKernel() error {
}
if err != nil {
- return errors.Wrap(err, "could not install WSL Kernel")
+ return fmt.Errorf("could not install WSL Kernel: %w", err)
}
return nil
@@ -816,6 +894,26 @@ func pipeCmdPassThrough(name string, input string, arg ...string) error {
return cmd.Run()
}
+func setupWslProxyEnv() (hasProxy bool) {
+ current, _ := os.LookupEnv("WSLENV")
+ for _, key := range config.ProxyEnv {
+ if value, _ := os.LookupEnv(key); len(value) < 1 {
+ continue
+ }
+
+ hasProxy = true
+ delim := ""
+ if len(current) > 0 {
+ delim = ":"
+ }
+ current = fmt.Sprintf("%s%s%s/u", current, delim, key)
+ }
+ if hasProxy {
+ os.Setenv("WSLENV", current)
+ }
+ return
+}
+
func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) {
// If one setting fails to be applied, the others settings will not fail and still be applied.
// The setting(s) that failed to be applied will have its errors returned in setErrors
@@ -824,23 +922,23 @@ func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) {
if opts.Rootful != nil && v.Rootful != *opts.Rootful {
err := v.setRootful(*opts.Rootful)
if err != nil {
- setErrors = append(setErrors, errors.Wrapf(err, "error setting rootful option"))
+ setErrors = append(setErrors, fmt.Errorf("error setting rootful option: %w", err))
} else {
v.Rootful = *opts.Rootful
}
}
if opts.CPUs != nil {
- setErrors = append(setErrors, errors.Errorf("changing CPUs not suppored for WSL machines"))
+ setErrors = append(setErrors, errors.New("changing CPUs not supported for WSL machines"))
}
if opts.Memory != nil {
- setErrors = append(setErrors, errors.Errorf("changing memory not suppored for WSL machines"))
+ setErrors = append(setErrors, errors.New("changing memory not supported for WSL machines"))
}
if opts.DiskSize != nil {
- setErrors = append(setErrors, errors.Errorf("changing Disk Size not suppored for WSL machines"))
+ setErrors = append(setErrors, errors.New("changing Disk Size not supported for WSL machines"))
}
return setErrors, v.writeConfig()
@@ -848,14 +946,18 @@ func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) {
func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
if v.isRunning() {
- return errors.Errorf("%q is already running", name)
+ return fmt.Errorf("%q is already running", name)
}
dist := toDist(name)
+ useProxy := setupWslProxyEnv()
+ if err := configureProxy(dist, useProxy); err != nil {
+ return err
+ }
err := runCmdPassThrough("wsl", "-d", dist, "/root/bootstrap")
if err != nil {
- return errors.Wrap(err, "WSL bootstrap script failed")
+ return fmt.Errorf("the WSL bootstrap script failed: %w", err)
}
if !v.Rootful {
@@ -897,7 +999,7 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
func launchWinProxy(v *MachineVM) (bool, string, error) {
machinePipe := toDist(v.Name)
if !pipeAvailable(machinePipe) {
- return false, "", errors.Errorf("could not start api proxy since expected pipe is not available: %s", machinePipe)
+ return false, "", fmt.Errorf("could not start api proxy since expected pipe is not available: %s", machinePipe)
}
globalName := false
@@ -945,7 +1047,7 @@ func launchWinProxy(v *MachineVM) (bool, string, error) {
return globalName, pipePrefix + waitPipe, waitPipeExists(waitPipe, 30, func() error {
active, exitCode := getProcessState(cmd.Process.Pid)
if !active {
- return errors.Errorf("win-sshproxy.exe failed to start, exit code: %d (see windows event logs)", exitCode)
+ return fmt.Errorf("win-sshproxy.exe failed to start, exit code: %d (see windows event logs)", exitCode)
}
return nil
@@ -1083,7 +1185,7 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error {
}
if !wsl || !sysd {
- return errors.Errorf("%q is not running", v.Name)
+ return fmt.Errorf("%q is not running", v.Name)
}
_, _, _ = v.updateTimeStamps(true)
@@ -1095,12 +1197,12 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error {
cmd := exec.Command("wsl", "-d", dist, "sh")
cmd.Stdin = strings.NewReader(waitTerm)
if err = cmd.Start(); err != nil {
- return errors.Wrap(err, "Error executing wait command")
+ return fmt.Errorf("executing wait command: %w", err)
}
exitCmd := exec.Command("wsl", "-d", dist, "/usr/local/bin/enterns", "systemctl", "exit", "0")
if err = exitCmd.Run(); err != nil {
- return errors.Wrap(err, "Error stopping sysd")
+ return fmt.Errorf("stopping sysd: %w", err)
}
if err = cmd.Wait(); err != nil {
@@ -1181,7 +1283,7 @@ func (v *MachineVM) Remove(name string, opts machine.RemoveOptions) (string, fun
var files []string
if v.isRunning() {
- return "", nil, errors.Errorf("running vm %q cannot be destroyed", v.Name)
+ return "", nil, fmt.Errorf("running vm %q cannot be destroyed", v.Name)
}
// Collect all the files that need to be destroyed
@@ -1253,7 +1355,7 @@ func (v *MachineVM) isRunning() bool {
// Added ssh function to VM interface: pkg/machine/config/go : line 58
func (v *MachineVM) SSH(name string, opts machine.SSHOptions) error {
if !v.isRunning() {
- return errors.Errorf("vm %q is not running.", v.Name)
+ return fmt.Errorf("vm %q is not running.", v.Name)
}
username := opts.Username
@@ -1312,6 +1414,7 @@ func GetVMInfos() ([]*machine.ListResponse, error) {
listEntry.RemoteUsername = vm.RemoteUsername
listEntry.Port = vm.Port
listEntry.IdentityPath = vm.IdentityPath
+ listEntry.Starting = false
running := vm.isRunning()
listEntry.CreatedAt, listEntry.LastUp, _ = vm.updateTimeStamps(running)
@@ -1477,7 +1580,7 @@ func (v *MachineVM) getResources() (resources machine.ResourceConfig) {
return
}
-// RemoveAndCleanMachines removes all machine and cleans up any other files associatied with podman machine
+// RemoveAndCleanMachines removes all machine and cleans up any other files associated with podman machine
func (p *Provider) RemoveAndCleanMachines() error {
var (
vm machine.VM
@@ -1552,3 +1655,7 @@ func (p *Provider) RemoveAndCleanMachines() error {
}
return prevErr
}
+
+func (p *Provider) VMType() string {
+ return vmtype
+}
diff --git a/pkg/machine/wsl/util_windows.go b/pkg/machine/wsl/util_windows.go
index b5c28e015..43f54fdd4 100644
--- a/pkg/machine/wsl/util_windows.go
+++ b/pkg/machine/wsl/util_windows.go
@@ -2,6 +2,7 @@ package wsl
import (
"encoding/base64"
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -11,7 +12,6 @@ import (
"unicode/utf16"
"unsafe"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/registry"
@@ -157,9 +157,9 @@ func relaunchElevatedWait() error {
case syscall.WAIT_OBJECT_0:
break
case syscall.WAIT_FAILED:
- return errors.Wrap(err, "could not wait for process, failed")
+ return fmt.Errorf("could not wait for process, failed: %w", err)
default:
- return errors.Errorf("could not wait for process, unknown error")
+ return errors.New("could not wait for process, unknown error")
}
var code uint32
if err := syscall.GetExitCodeProcess(handle, &code); err != nil {
@@ -174,7 +174,7 @@ func relaunchElevatedWait() error {
func wrapMaybe(err error, message string) error {
if err != nil {
- return errors.Wrap(err, message)
+ return fmt.Errorf("%v: %w", message, err)
}
return errors.New(message)
@@ -182,10 +182,10 @@ func wrapMaybe(err error, message string) error {
func wrapMaybef(err error, format string, args ...interface{}) error {
if err != nil {
- return errors.Wrapf(err, format, args...)
+ return fmt.Errorf(format+": %w", append(args, err)...)
}
- return errors.Errorf(format, args...)
+ return fmt.Errorf(format, args...)
}
func reboot() error {
@@ -202,14 +202,14 @@ func reboot() error {
dataDir, err := homedir.GetDataHome()
if err != nil {
- return errors.Wrap(err, "could not determine data directory")
+ return fmt.Errorf("could not determine data directory: %w", err)
}
if err := os.MkdirAll(dataDir, 0755); err != nil {
- return errors.Wrap(err, "could not create data directory")
+ return fmt.Errorf("could not create data directory: %w", err)
}
commFile := filepath.Join(dataDir, "podman-relaunch.dat")
if err := ioutil.WriteFile(commFile, []byte(encoded), 0600); err != nil {
- return errors.Wrap(err, "could not serialize command state")
+ return fmt.Errorf("could not serialize command state: %w", err)
}
command := fmt.Sprintf(pShellLaunch, commFile)
@@ -244,7 +244,7 @@ func reboot() error {
procExit := user32.NewProc("ExitWindowsEx")
if ret, _, err := procExit.Call(EWX_REBOOT|EWX_RESTARTAPPS|EWX_FORCEIFHUNG,
SHTDN_REASON_MAJOR_APPLICATION|SHTDN_REASON_MINOR_INSTALLATION|SHTDN_REASON_FLAG_PLANNED); ret != 1 {
- return errors.Wrap(err, "reboot failed")
+ return fmt.Errorf("reboot failed: %w", err)
}
return nil
@@ -262,19 +262,19 @@ func obtainShutdownPrivilege() error {
var hToken uintptr
if ret, _, err := OpenProcessToken.Call(uintptr(proc), TOKEN_ADJUST_PRIVILEGES|TOKEN_QUERY, uintptr(unsafe.Pointer(&hToken))); ret != 1 {
- return errors.Wrap(err, "error opening process token")
+ return fmt.Errorf("error opening process token: %w", err)
}
var privs TokenPrivileges
if ret, _, err := LookupPrivilegeValue.Call(uintptr(0), uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(SeShutdownName))), uintptr(unsafe.Pointer(&(privs.privileges[0].luid)))); ret != 1 {
- return errors.Wrap(err, "error looking up shutdown privilege")
+ return fmt.Errorf("error looking up shutdown privilege: %w", err)
}
privs.privilegeCount = 1
privs.privileges[0].attributes = SE_PRIVILEGE_ENABLED
if ret, _, err := AdjustTokenPrivileges.Call(hToken, 0, uintptr(unsafe.Pointer(&privs)), 0, uintptr(0), 0); ret != 1 {
- return errors.Wrap(err, "error enabling shutdown privilege on token")
+ return fmt.Errorf("error enabling shutdown privilege on token: %w", err)
}
return nil
@@ -295,13 +295,13 @@ func getProcessState(pid int) (active bool, exitCode int) {
func addRunOnceRegistryEntry(command string) error {
k, _, err := registry.CreateKey(registry.CURRENT_USER, `Software\Microsoft\Windows\CurrentVersion\RunOnce`, registry.WRITE)
if err != nil {
- return errors.Wrap(err, "could not open RunOnce registry entry")
+ return fmt.Errorf("could not open RunOnce registry entry: %w", err)
}
defer k.Close()
if err := k.SetExpandStringValue("podman-machine", command); err != nil {
- return errors.Wrap(err, "could not open RunOnce registry entry")
+ return fmt.Errorf("could not open RunOnce registry entry: %w", err)
}
return nil
diff --git a/pkg/namespaces/namespaces.go b/pkg/namespaces/namespaces.go
index c95f8e275..8eacb8da7 100644
--- a/pkg/namespaces/namespaces.go
+++ b/pkg/namespaces/namespaces.go
@@ -112,7 +112,7 @@ func (n UsernsMode) IsDefaultValue() bool {
return n == "" || n == defaultType
}
-// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically
+// GetAutoOptions returns a AutoUserNsOptions with the settings to automatically set up
// a user namespace.
func (n UsernsMode) GetAutoOptions() (*types.AutoUserNsOptions, error) {
parts := strings.SplitN(string(n), ":", 2)
diff --git a/pkg/parallel/parallel.go b/pkg/parallel/parallel.go
index 4da7e0f89..7209f8aca 100644
--- a/pkg/parallel/parallel.go
+++ b/pkg/parallel/parallel.go
@@ -2,9 +2,10 @@ package parallel
import (
"context"
+ "errors"
+ "fmt"
"sync"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
)
@@ -59,7 +60,7 @@ func Enqueue(ctx context.Context, fn func() error) <-chan error {
defer close(retChan)
if err := jobControl.Acquire(ctx, 1); err != nil {
- retChan <- errors.Wrapf(err, "error acquiring job control semaphore")
+ retChan <- fmt.Errorf("error acquiring job control semaphore: %w", err)
return
}
diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go
index 5fe1b83e2..7fc01de31 100644
--- a/pkg/ps/ps.go
+++ b/pkg/ps/ps.go
@@ -1,6 +1,8 @@
package ps
import (
+ "errors"
+ "fmt"
"os"
"path/filepath"
"regexp"
@@ -16,7 +18,6 @@ import (
psdefine "github.com/containers/podman/v4/pkg/ps/define"
"github.com/containers/storage"
"github.com/containers/storage/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -65,7 +66,7 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp
for _, con := range cons {
listCon, err := ListContainerBatch(runtime, con, options)
switch {
- case errors.Cause(err) == define.ErrNoSuchCtr:
+ case errors.Is(err, define.ErrNoSuchCtr):
continue
case err != nil:
return nil, err
@@ -108,7 +109,7 @@ func GetExternalContainerLists(runtime *libpod.Runtime) ([]entities.ListContaine
for _, con := range externCons {
listCon, err := ListStorageContainer(runtime, con)
switch {
- case errors.Cause(err) == types.ErrLoadError:
+ case errors.Is(err, types.ErrLoadError):
continue
case err != nil:
return nil, err
@@ -138,19 +139,19 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
batchErr := ctr.Batch(func(c *libpod.Container) error {
if opts.Sync {
if err := c.Sync(); err != nil {
- return errors.Wrapf(err, "unable to update container state from OCI runtime")
+ return fmt.Errorf("unable to update container state from OCI runtime: %w", err)
}
}
conConfig = c.Config()
conState, err = c.State()
if err != nil {
- return errors.Wrapf(err, "unable to obtain container state")
+ return fmt.Errorf("unable to obtain container state: %w", err)
}
exitCode, exited, err = c.ExitCode()
if err != nil {
- return errors.Wrapf(err, "unable to obtain container exit code")
+ return fmt.Errorf("unable to obtain container exit code: %w", err)
}
startedTime, err = c.StartedTime()
if err != nil {
@@ -163,7 +164,7 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
pid, err = c.PID()
if err != nil {
- return errors.Wrapf(err, "unable to obtain container pid")
+ return fmt.Errorf("unable to obtain container pid: %w", err)
}
if !opts.Size && !opts.Namespace {
@@ -237,8 +238,8 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
if opts.Pod && len(conConfig.Pod) > 0 {
podName, err := rt.GetName(conConfig.Pod)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
- return entities.ListContainer{}, errors.Wrapf(define.ErrNoSuchPod, "could not find container %s pod (id %s) in state", conConfig.ID, conConfig.Pod)
+ if errors.Is(err, define.ErrNoSuchCtr) {
+ return entities.ListContainer{}, fmt.Errorf("could not find container %s pod (id %s) in state: %w", conConfig.ID, conConfig.Pod, define.ErrNoSuchPod)
}
return entities.ListContainer{}, err
}
@@ -282,7 +283,7 @@ func ListStorageContainer(rt *libpod.Runtime, ctr storage.Container) (entities.L
buildahCtr, err := rt.IsBuildahContainer(ctr.ID)
if err != nil {
- return ps, errors.Wrapf(err, "error determining buildah container for container %s", ctr.ID)
+ return ps, fmt.Errorf("error determining buildah container for container %s: %w", ctr.ID, err)
}
if buildahCtr {
@@ -311,7 +312,7 @@ func ListStorageContainer(rt *libpod.Runtime, ctr storage.Container) (entities.L
func getNamespaceInfo(path string) (string, error) {
val, err := os.Readlink(path)
if err != nil {
- return "", errors.Wrapf(err, "error getting info from %q", path)
+ return "", fmt.Errorf("error getting info from %q: %w", path, err)
}
return getStrFromSquareBrackets(val), nil
}
diff --git a/pkg/resolvconf/dns/resolvconf.go b/pkg/resolvconf/dns/resolvconf.go
deleted file mode 100644
index cb4bd1033..000000000
--- a/pkg/resolvconf/dns/resolvconf.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Originally from github.com/docker/libnetwork/resolvconf/dns
-
-package dns
-
-import (
- "regexp"
-)
-
-// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
-const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
-
-// IPv4Localhost is a regex pattern for IPv4 localhost address range.
-const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})`
-
-var localhostIPRegexp = regexp.MustCompile(IPLocalhost)
-var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost)
-
-// IsLocalhost returns true if ip matches the localhost IP regular expression.
-// Used for determining if nameserver settings are being passed which are
-// localhost addresses
-func IsLocalhost(ip string) bool {
- return localhostIPRegexp.MatchString(ip)
-}
-
-// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression.
-func IsIPv4Localhost(ip string) bool {
- return localhostIPv4Regexp.MatchString(ip)
-}
diff --git a/pkg/rootless/rootless.go b/pkg/rootless/rootless.go
index d7143f549..94535f45e 100644
--- a/pkg/rootless/rootless.go
+++ b/pkg/rootless/rootless.go
@@ -50,7 +50,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) {
if err != nil {
// It is still failing. We can safely remove it.
os.Remove(pausePidPath)
- return false, -1, nil // nolint: nilerr
+ return false, -1, nil //nolint: nilerr
}
return became, ret, err
}
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 94bd40f86..3588313c6 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -178,7 +178,7 @@ get_cmd_line_args ()
char *tmp = realloc (buffer, allocated);
if (tmp == NULL)
return NULL;
- buffer = tmp;
+ buffer = tmp;
}
}
@@ -243,7 +243,7 @@ can_use_shortcut ()
}
if (argv[argc+1] != NULL && (strcmp (argv[argc], "container") == 0 ||
- strcmp (argv[argc], "image") == 0) &&
+ strcmp (argv[argc], "image") == 0) &&
(strcmp (argv[argc+1], "mount") == 0 || strcmp (argv[argc+1], "scp") == 0))
{
ret = false;
@@ -512,7 +512,9 @@ create_pause_process (const char *pause_pid_file_path, char **argv)
r = TEMP_FAILURE_RETRY (read (p[0], &b, 1));
close (p[0]);
- reexec_in_user_namespace_wait (pid, 0);
+ r = reexec_in_user_namespace_wait (pid, 0);
+ if (r != 0)
+ return -1;
return r == 1 && b == '0' ? 0 : -1;
}
@@ -757,6 +759,7 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path)
}
execvp (argv[0], argv);
+ fprintf (stderr, "failed to execvp %s: %m\n", argv[0]);
_exit (EXIT_FAILURE);
}
@@ -788,7 +791,10 @@ copy_file_to_fd (const char *file_to_read, int outfd)
fd = open (file_to_read, O_RDONLY);
if (fd < 0)
- return fd;
+ {
+ fprintf (stderr, "open `%s`: %m\n", file_to_read);
+ return fd;
+ }
for (;;)
{
@@ -796,7 +802,10 @@ copy_file_to_fd (const char *file_to_read, int outfd)
r = TEMP_FAILURE_RETRY (read (fd, buf, sizeof buf));
if (r < 0)
- return r;
+ {
+ fprintf (stderr, "read from `%s`: %m\n", file_to_read);
+ return r;
+ }
if (r == 0)
break;
@@ -805,7 +814,10 @@ copy_file_to_fd (const char *file_to_read, int outfd)
{
w = TEMP_FAILURE_RETRY (write (outfd, &buf[t], r - t));
if (w < 0)
- return w;
+ {
+ fprintf (stderr, "write file to output fd `%s`: %m\n", file_to_read);
+ return w;
+ }
t += w;
}
}
diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go
index 5af9a978b..b0012b32b 100644
--- a/pkg/rootless/rootless_linux.go
+++ b/pkg/rootless/rootless_linux.go
@@ -6,6 +6,7 @@ package rootless
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -23,7 +24,6 @@ import (
"github.com/containers/storage/pkg/idtools"
pmount "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
"golang.org/x/sys/unix"
@@ -126,7 +126,7 @@ func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) err
}
path, err := exec.LookPath(tool)
if err != nil {
- return errors.Wrapf(err, "command required for rootless mode with multiple IDs")
+ return fmt.Errorf("command required for rootless mode with multiple IDs: %w", err)
}
appendTriplet := func(l []string, a, b, c int) []string {
@@ -143,7 +143,7 @@ func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) err
what = "GID"
where = "/etc/subgid"
}
- return errors.Errorf("invalid configuration: the specified mapping %d:%d in %q includes the user %s", i.HostID, i.Size, where, what)
+ return fmt.Errorf("invalid configuration: the specified mapping %d:%d in %q includes the user %s", i.HostID, i.Size, where, what)
}
args = appendTriplet(args, i.ContainerID+1, i.HostID, i.Size)
}
@@ -154,13 +154,13 @@ func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) err
if output, err := cmd.CombinedOutput(); err != nil {
logrus.Errorf("running `%s`: %s", strings.Join(args, " "), output)
- errorStr := fmt.Sprintf("cannot setup namespace using %q", path)
+ errorStr := fmt.Sprintf("cannot set up namespace using %q", path)
if isSet, err := unshare.IsSetID(cmd.Path, mode, cap); err != nil {
logrus.Errorf("Failed to check for %s on %s: %v", idtype, path, err)
} else if !isSet {
errorStr = fmt.Sprintf("%s: should have %s or have filecaps %s", errorStr, idtype, idtype)
}
- return errors.Wrapf(err, errorStr)
+ return fmt.Errorf("%v: %w", errorStr, err)
}
return nil
}
@@ -182,7 +182,7 @@ func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) {
pidC := C.reexec_userns_join(C.int(pid), cPausePid)
if int(pidC) < 0 {
- return false, -1, errors.Errorf("cannot re-exec process")
+ return false, -1, fmt.Errorf("cannot re-exec process to join the existing user namespace")
}
ret := C.reexec_in_user_namespace_wait(pidC, 0)
@@ -303,7 +303,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo
if retErr != nil && pid > 0 {
if err := unix.Kill(pid, unix.SIGKILL); err != nil {
if err != unix.ESRCH {
- logrus.Errorf("Failed to cleanup process %d: %v", pid, err)
+ logrus.Errorf("Failed to clean up process %d: %v", pid, err)
}
}
C.reexec_in_user_namespace_wait(C.int(pid), 0)
@@ -313,7 +313,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo
pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD)
pid = int(pidC)
if pid < 0 {
- return false, -1, errors.Errorf("cannot re-exec process")
+ return false, -1, fmt.Errorf("cannot re-exec process")
}
uids, gids, err := GetConfiguredMappings()
@@ -343,13 +343,13 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo
setgroups := fmt.Sprintf("/proc/%d/setgroups", pid)
err = ioutil.WriteFile(setgroups, []byte("deny\n"), 0666)
if err != nil {
- return false, -1, errors.Wrapf(err, "cannot write setgroups file")
+ return false, -1, fmt.Errorf("cannot write setgroups file: %w", err)
}
logrus.Debugf("write setgroups file exited with 0")
err = ioutil.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Geteuid())), 0666)
if err != nil {
- return false, -1, errors.Wrapf(err, "cannot write uid_map")
+ return false, -1, fmt.Errorf("cannot write uid_map: %w", err)
}
logrus.Debugf("write uid_map exited with 0")
}
@@ -369,19 +369,19 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo
if !gidsMapped {
err = ioutil.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getegid())), 0666)
if err != nil {
- return false, -1, errors.Wrapf(err, "cannot write gid_map")
+ return false, -1, fmt.Errorf("cannot write gid_map: %w", err)
}
}
_, err = w.Write([]byte("0"))
if err != nil {
- return false, -1, errors.Wrapf(err, "write to sync pipe")
+ return false, -1, fmt.Errorf("write to sync pipe: %w", err)
}
b := make([]byte, 1)
_, err = w.Read(b)
if err != nil {
- return false, -1, errors.Wrapf(err, "read from sync pipe")
+ return false, -1, fmt.Errorf("read from sync pipe: %w", err)
}
if fileOutput != nil {
@@ -461,13 +461,8 @@ func BecomeRootInUserNS(pausePid string) (bool, int, error) {
// different uidmap and the unprivileged user has no way to read the
// file owned by the root in the container.
func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []string) (bool, int, error) {
- if len(paths) == 0 {
- return BecomeRootInUserNS(pausePidPath)
- }
-
var lastErr error
var pausePid int
- foundProcess := false
for _, path := range paths {
if !needNewNamespace {
@@ -479,12 +474,9 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st
pausePid, err = strconv.Atoi(string(data))
if err != nil {
- lastErr = errors.Wrapf(err, "cannot parse file %s", path)
+ lastErr = fmt.Errorf("cannot parse file %q: %w", path, err)
continue
}
-
- lastErr = nil
- break
} else {
r, w, err := os.Pipe()
if err != nil {
@@ -511,26 +503,29 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st
n, err := r.Read(b)
if err != nil {
- lastErr = errors.Wrapf(err, "cannot read %s\n", path)
+ lastErr = fmt.Errorf("cannot read %q: %w", path, err)
continue
}
pausePid, err = strconv.Atoi(string(b[:n]))
- if err == nil && unix.Kill(pausePid, 0) == nil {
- foundProcess = true
- lastErr = nil
- break
+ if err != nil {
+ lastErr = err
+ continue
}
}
- }
- if !foundProcess && pausePidPath != "" {
- return BecomeRootInUserNS(pausePidPath)
+
+ if pausePid > 0 && unix.Kill(pausePid, 0) == nil {
+ joined, pid, err := joinUserAndMountNS(uint(pausePid), pausePidPath)
+ if err == nil {
+ return joined, pid, nil
+ }
+ lastErr = err
+ }
}
if lastErr != nil {
return false, 0, lastErr
}
-
- return joinUserAndMountNS(uint(pausePid), pausePidPath)
+ return false, 0, fmt.Errorf("could not find any running process: %w", unix.ESRCH)
}
// ReadMappingsProc parses and returns the ID mappings at the specified path.
@@ -550,7 +545,7 @@ func ReadMappingsProc(path string) ([]idtools.IDMap, error) {
if err == io.EOF {
return mappings, nil
}
- return nil, errors.Wrapf(err, "cannot read line from %s", path)
+ return nil, fmt.Errorf("cannot read line from %s: %w", path, err)
}
if line == nil {
return mappings, nil
@@ -558,7 +553,7 @@ func ReadMappingsProc(path string) ([]idtools.IDMap, error) {
containerID, hostID, size := 0, 0, 0
if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil {
- return nil, errors.Wrapf(err, "cannot parse %s", string(line))
+ return nil, fmt.Errorf("cannot parse %s: %w", string(line), err)
}
mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size})
}
diff --git a/pkg/rootless/rootless_unsupported.go b/pkg/rootless/rootless_unsupported.go
index fe164e235..a77e7e077 100644
--- a/pkg/rootless/rootless_unsupported.go
+++ b/pkg/rootless/rootless_unsupported.go
@@ -4,10 +4,10 @@
package rootless
import (
+ "errors"
"os"
"github.com/containers/storage/pkg/idtools"
- "github.com/pkg/errors"
)
// IsRootless returns whether the user is rootless
diff --git a/pkg/seccomp/seccomp.go b/pkg/seccomp/seccomp.go
index 4502c608f..d19d45960 100644
--- a/pkg/seccomp/seccomp.go
+++ b/pkg/seccomp/seccomp.go
@@ -1,9 +1,8 @@
package seccomp
import (
+ "fmt"
"sort"
-
- "github.com/pkg/errors"
)
// ContainerImageLabel is the key of the image annotation embedding a seccomp
@@ -50,5 +49,5 @@ func LookupPolicy(s string) (Policy, error) {
}
sort.Strings(keys)
- return -1, errors.Errorf("invalid seccomp policy %q: valid policies are %+q", s, keys)
+ return -1, fmt.Errorf("invalid seccomp policy %q: valid policies are %+q", s, keys)
}
diff --git a/pkg/signal/signal_common.go b/pkg/signal/signal_common.go
index 5ea67843a..fc1ecc04d 100644
--- a/pkg/signal/signal_common.go
+++ b/pkg/signal/signal_common.go
@@ -2,6 +2,8 @@ package signal
import (
"fmt"
+ "os"
+ "os/signal"
"strconv"
"strings"
"syscall"
@@ -17,7 +19,7 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) {
}
return syscall.Signal(s), nil
}
- sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
+ sig, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
if !ok {
return -1, fmt.Errorf("invalid signal: %s", rawSignal)
}
@@ -32,10 +34,25 @@ func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) {
if err == nil {
return s, nil
}
- for k, v := range signalMap {
+ for k, v := range SignalMap {
if k == strings.ToUpper(basename) {
return v, nil
}
}
return -1, fmt.Errorf("invalid signal: %s", basename)
}
+
+// CatchAll catches all signals and relays them to the specified channel.
+func CatchAll(sigc chan os.Signal) {
+ handledSigs := make([]os.Signal, 0, len(SignalMap))
+ for _, s := range SignalMap {
+ handledSigs = append(handledSigs, s)
+ }
+ signal.Notify(sigc, handledSigs...)
+}
+
+// StopCatch stops catching the signals and closes the specified channel.
+func StopCatch(sigc chan os.Signal) {
+ signal.Stop(sigc)
+ close(sigc)
+}
diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go
index 21e09c9fe..5103b6033 100644
--- a/pkg/signal/signal_linux.go
+++ b/pkg/signal/signal_linux.go
@@ -9,8 +9,6 @@ package signal
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
- "os"
- "os/signal"
"syscall"
"golang.org/x/sys/unix"
@@ -23,8 +21,8 @@ const (
SIGWINCH = syscall.SIGWINCH // For cross-compilation with Windows
)
-// signalMap is a map of Linux signals.
-var signalMap = map[string]syscall.Signal{
+// SignalMap is a map of Linux signals.
+var SignalMap = map[string]syscall.Signal{
"ABRT": unix.SIGABRT,
"ALRM": unix.SIGALRM,
"BUS": unix.SIGBUS,
@@ -91,18 +89,3 @@ var signalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
-
-// CatchAll catches all signals and relays them to the specified channel.
-func CatchAll(sigc chan os.Signal) {
- handledSigs := make([]os.Signal, 0, len(signalMap))
- for _, s := range signalMap {
- handledSigs = append(handledSigs, s)
- }
- signal.Notify(sigc, handledSigs...)
-}
-
-// StopCatch stops catching the signals and closes the specified channel.
-func StopCatch(sigc chan os.Signal) {
- signal.Stop(sigc)
- close(sigc)
-}
diff --git a/pkg/signal/signal_linux_mipsx.go b/pkg/signal/signal_linux_mipsx.go
index 52b07aaf4..cdf9ad4c5 100644
--- a/pkg/signal/signal_linux_mipsx.go
+++ b/pkg/signal/signal_linux_mipsx.go
@@ -10,8 +10,6 @@ package signal
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
- "os"
- "os/signal"
"syscall"
"golang.org/x/sys/unix"
@@ -24,8 +22,8 @@ const (
SIGWINCH = syscall.SIGWINCH
)
-// signalMap is a map of Linux signals.
-var signalMap = map[string]syscall.Signal{
+// SignalMap is a map of Linux signals.
+var SignalMap = map[string]syscall.Signal{
"ABRT": unix.SIGABRT,
"ALRM": unix.SIGALRM,
"BUS": unix.SIGBUS,
@@ -92,18 +90,3 @@ var signalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
-
-// CatchAll catches all signals and relays them to the specified channel.
-func CatchAll(sigc chan os.Signal) {
- handledSigs := make([]os.Signal, 0, len(signalMap))
- for _, s := range signalMap {
- handledSigs = append(handledSigs, s)
- }
- signal.Notify(sigc, handledSigs...)
-}
-
-// StopCatch stops catching the signals and closes the specified channel.
-func StopCatch(sigc chan os.Signal) {
- signal.Stop(sigc)
- close(sigc)
-}
diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go
index c0aa62d21..7919e3670 100644
--- a/pkg/signal/signal_unix.go
+++ b/pkg/signal/signal_unix.go
@@ -5,7 +5,6 @@
package signal
import (
- "os"
"syscall"
)
@@ -16,12 +15,12 @@ const (
SIGWINCH = syscall.SIGWINCH
)
-// signalMap is a map of Linux signals.
+// SignalMap is a map of Linux signals.
// These constants are sourced from the Linux version of golang.org/x/sys/unix
// (I don't see much risk of this changing).
// This should work as long as Podman only runs containers on Linux, which seems
// a safe assumption for now.
-var signalMap = map[string]syscall.Signal{
+var SignalMap = map[string]syscall.Signal{
"ABRT": syscall.Signal(0x6),
"ALRM": syscall.Signal(0xe),
"BUS": syscall.Signal(0x7),
@@ -88,13 +87,3 @@ var signalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
-
-// CatchAll catches all signals and relays them to the specified channel.
-func CatchAll(sigc chan os.Signal) {
- panic("Unsupported on non-linux platforms")
-}
-
-// StopCatch stops catching the signals and closes the specified channel.
-func StopCatch(sigc chan os.Signal) {
- panic("Unsupported on non-linux platforms")
-}
diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go
index d8bba7c90..19ae93a61 100644
--- a/pkg/signal/signal_unsupported.go
+++ b/pkg/signal/signal_unsupported.go
@@ -5,7 +5,6 @@
package signal
import (
- "os"
"syscall"
)
@@ -16,12 +15,12 @@ const (
SIGWINCH = syscall.Signal(0xff)
)
-// signalMap is a map of Linux signals.
+// SignalMap is a map of Linux signals.
// These constants are sourced from the Linux version of golang.org/x/sys/unix
// (I don't see much risk of this changing).
// This should work as long as Podman only runs containers on Linux, which seems
// a safe assumption for now.
-var signalMap = map[string]syscall.Signal{
+var SignalMap = map[string]syscall.Signal{
"ABRT": syscall.Signal(0x6),
"ALRM": syscall.Signal(0xe),
"BUS": syscall.Signal(0x7),
@@ -88,13 +87,3 @@ var signalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
-
-// CatchAll catches all signals and relays them to the specified channel.
-func CatchAll(sigc chan os.Signal) {
- panic("Unsupported on non-linux platforms")
-}
-
-// StopCatch stops catching the signals and closes the specified channel.
-func StopCatch(sigc chan os.Signal) {
- panic("Unsupported on non-linux platforms")
-}
diff --git a/pkg/specgen/config_unsupported.go b/pkg/specgen/config_unsupported.go
index a6bf77277..becfd2eaf 100644
--- a/pkg/specgen/config_unsupported.go
+++ b/pkg/specgen/config_unsupported.go
@@ -4,9 +4,10 @@
package specgen
import (
+ "errors"
+
"github.com/containers/common/libimage"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec, img *libimage.Image) (*spec.LinuxSeccomp, error) {
diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go
index 355fbc368..63d94b6b3 100644
--- a/pkg/specgen/container_validate.go
+++ b/pkg/specgen/container_validate.go
@@ -1,14 +1,15 @@
package specgen
import (
+ "errors"
+ "fmt"
"strconv"
"strings"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/containers/podman/v4/pkg/util"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
var (
@@ -23,7 +24,7 @@ var (
)
func exclusiveOptions(opt1, opt2 string) error {
- return errors.Errorf("%s and %s are mutually exclusive options", opt1, opt2)
+ return fmt.Errorf("%s and %s are mutually exclusive options", opt1, opt2)
}
// Validate verifies that the given SpecGenerator is valid and satisfies required
@@ -33,18 +34,18 @@ func (s *SpecGenerator) Validate() error {
// associated with them because those should be on the infra container.
if len(s.Pod) > 0 && s.NetNS.NSMode == FromPod {
if len(s.Networks) > 0 {
- return errors.Wrap(define.ErrNetworkOnPodContainer, "networks must be defined when the pod is created")
+ return fmt.Errorf("networks must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer)
}
if len(s.PortMappings) > 0 || s.PublishExposedPorts {
- return errors.Wrap(define.ErrNetworkOnPodContainer, "published or exposed ports must be defined when the pod is created")
+ return fmt.Errorf("published or exposed ports must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer)
}
if len(s.HostAdd) > 0 {
- return errors.Wrap(define.ErrNetworkOnPodContainer, "extra host entries must be specified on the pod")
+ return fmt.Errorf("extra host entries must be specified on the pod: %w", define.ErrNetworkOnPodContainer)
}
}
if s.NetNS.IsContainer() && len(s.HostAdd) > 0 {
- return errors.Wrap(ErrInvalidSpecConfig, "cannot set extra host entries when the container is joined to another containers network namespace")
+ return fmt.Errorf("cannot set extra host entries when the container is joined to another containers network namespace: %w", ErrInvalidSpecConfig)
}
//
@@ -52,22 +53,23 @@ func (s *SpecGenerator) Validate() error {
//
// Rootfs and Image cannot both populated
if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 {
- return errors.Wrap(ErrInvalidSpecConfig, "both image and rootfs cannot be simultaneously")
+ return fmt.Errorf("both image and rootfs cannot be simultaneously: %w", ErrInvalidSpecConfig)
}
// Cannot set hostname and utsns
if len(s.ContainerBasicConfig.Hostname) > 0 && !s.ContainerBasicConfig.UtsNS.IsPrivate() {
if s.ContainerBasicConfig.UtsNS.IsPod() {
- return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when joining the pod UTS namespace")
+ return fmt.Errorf("cannot set hostname when joining the pod UTS namespace: %w", ErrInvalidSpecConfig)
}
- return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when running in the host UTS namespace")
+
+ return fmt.Errorf("cannot set hostname when running in the host UTS namespace: %w", ErrInvalidSpecConfig)
}
// systemd values must be true, false, or always
if len(s.ContainerBasicConfig.Systemd) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.Systemd), SystemDValues) {
- return errors.Wrapf(ErrInvalidSpecConfig, "--systemd values must be one of %q", strings.Join(SystemDValues, ", "))
+ return fmt.Errorf("--systemd values must be one of %q: %w", strings.Join(SystemDValues, ", "), ErrInvalidSpecConfig)
}
// sdnotify values must be container, conmon, or ignore
if len(s.ContainerBasicConfig.SdNotifyMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.SdNotifyMode), SdNotifyModeValues) {
- return errors.Wrapf(ErrInvalidSpecConfig, "--sdnotify values must be one of %q", strings.Join(SdNotifyModeValues, ", "))
+ return fmt.Errorf("--sdnotify values must be one of %q: %w", strings.Join(SdNotifyModeValues, ", "), ErrInvalidSpecConfig)
}
//
@@ -79,12 +81,12 @@ func (s *SpecGenerator) Validate() error {
}
// imagevolumemode must be one of ignore, tmpfs, or anonymous if given
if len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode), ImageVolumeModeValues) {
- return errors.Errorf("invalid ImageVolumeMode %q, value must be one of %s",
+ return fmt.Errorf("invalid ImageVolumeMode %q, value must be one of %s",
s.ContainerStorageConfig.ImageVolumeMode, strings.Join(ImageVolumeModeValues, ","))
}
// shmsize conflicts with IPC namespace
if s.ContainerStorageConfig.ShmSize != nil && (s.ContainerStorageConfig.IpcNS.IsHost() || s.ContainerStorageConfig.IpcNS.IsNone()) {
- return errors.Errorf("cannot set shmsize when running in the %s IPC Namespace", s.ContainerStorageConfig.IpcNS)
+ return fmt.Errorf("cannot set shmsize when running in the %s IPC Namespace", s.ContainerStorageConfig.IpcNS)
}
//
@@ -92,7 +94,7 @@ func (s *SpecGenerator) Validate() error {
//
// userns and idmappings conflict
if s.UserNS.IsPrivate() && s.IDMappings == nil {
- return errors.Wrap(ErrInvalidSpecConfig, "IDMappings are required when not creating a User namespace")
+ return fmt.Errorf("IDMappings are required when not creating a User namespace: %w", ErrInvalidSpecConfig)
}
//
@@ -142,11 +144,11 @@ func (s *SpecGenerator) Validate() error {
for _, limit := range tmpnproc {
limitSplit := strings.SplitN(limit, "=", 2)
if len(limitSplit) < 2 {
- return errors.Wrapf(invalidUlimitFormatError, "missing = in %s", limit)
+ return fmt.Errorf("missing = in %s: %w", limit, invalidUlimitFormatError)
}
valueSplit := strings.SplitN(limitSplit[1], ":", 2)
if len(valueSplit) < 2 {
- return errors.Wrapf(invalidUlimitFormatError, "missing : in %s", limit)
+ return fmt.Errorf("missing : in %s: %w", limit, invalidUlimitFormatError)
}
hard, err := strconv.Atoi(valueSplit[0])
if err != nil {
@@ -183,10 +185,12 @@ func (s *SpecGenerator) Validate() error {
}
// Set defaults if network info is not provided
- if s.NetNS.NSMode == "" {
- s.NetNS.NSMode = Bridge
+ // when we are rootless we default to slirp4netns
+ if s.NetNS.IsPrivate() || s.NetNS.IsDefault() {
if rootless.IsRootless() {
s.NetNS.NSMode = Slirp
+ } else {
+ s.NetNS.NSMode = Bridge
}
}
if err := validateNetNS(&s.NetNS); err != nil {
@@ -194,7 +198,7 @@ func (s *SpecGenerator) Validate() error {
}
if s.NetNS.NSMode != Bridge && len(s.Networks) > 0 {
// Note that we also get the ip and mac in the networks map
- return errors.New("Networks and static ip/mac address can only be used with Bridge mode networking")
+ return errors.New("networks and static ip/mac address can only be used with Bridge mode networking")
}
return nil
diff --git a/pkg/specgen/generate/config_linux.go b/pkg/specgen/generate/config_linux.go
index ed2e5408d..a46966161 100644
--- a/pkg/specgen/generate/config_linux.go
+++ b/pkg/specgen/generate/config_linux.go
@@ -3,7 +3,6 @@ package generate
import (
"fmt"
"io/fs"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -11,63 +10,13 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
+ "github.com/containers/podman/v4/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
-var (
- errNotADevice = errors.New("not a device node")
-)
-
-func addPrivilegedDevices(g *generate.Generator) error {
- hostDevices, err := getDevices("/dev")
- if err != nil {
- return err
- }
- g.ClearLinuxDevices()
-
- if rootless.IsRootless() {
- mounts := make(map[string]interface{})
- for _, m := range g.Mounts() {
- mounts[m.Destination] = true
- }
- newMounts := []spec.Mount{}
- for _, d := range hostDevices {
- devMnt := spec.Mount{
- Destination: d.Path,
- Type: define.TypeBind,
- Source: d.Path,
- Options: []string{"slave", "nosuid", "noexec", "rw", "rbind"},
- }
- if d.Path == "/dev/ptmx" || strings.HasPrefix(d.Path, "/dev/tty") {
- continue
- }
- if _, found := mounts[d.Path]; found {
- continue
- }
- newMounts = append(newMounts, devMnt)
- }
- g.Config.Mounts = append(newMounts, g.Config.Mounts...)
- if g.Config.Linux.Resources != nil {
- g.Config.Linux.Resources.Devices = nil
- }
- } else {
- for _, d := range hostDevices {
- g.AddDevice(d)
- }
- // Add resources device - need to clear the existing one first.
- if g.Config.Linux.Resources != nil {
- g.Config.Linux.Resources.Devices = nil
- }
- g.AddLinuxResourcesDevice(true, "", nil, nil, "rwm")
- }
-
- return nil
-}
-
// DevicesFromPath computes a list of devices
func DevicesFromPath(g *generate.Generator, devicePath string) error {
devs := strings.Split(devicePath, ":")
@@ -96,7 +45,7 @@ func DevicesFromPath(g *generate.Generator, devicePath string) error {
}
if len(devs) > 2 {
if devmode != "" {
- return errors.Wrapf(unix.EINVAL, "invalid device specification %s", devicePath)
+ return fmt.Errorf("invalid device specification %s: %w", devicePath, unix.EINVAL)
}
devmode = devs[2]
}
@@ -110,7 +59,7 @@ func DevicesFromPath(g *generate.Generator, devicePath string) error {
device = fmt.Sprintf("%s:%s", device, devmode)
}
if err := addDevice(g, device); err != nil {
- return errors.Wrapf(err, "failed to add %s device", dpath)
+ return fmt.Errorf("failed to add %s device: %w", dpath, err)
}
}
return nil
@@ -118,7 +67,7 @@ func DevicesFromPath(g *generate.Generator, devicePath string) error {
return err
}
if !found {
- return errors.Wrapf(unix.EINVAL, "no devices found in %s", devicePath)
+ return fmt.Errorf("no devices found in %s: %w", devicePath, unix.EINVAL)
}
return nil
}
@@ -174,62 +123,14 @@ func BlockAccessToKernelFilesystems(privileged, pidModeIsHost bool, mask, unmask
}
}
-// based on getDevices from runc (libcontainer/devices/devices.go)
-func getDevices(path string) ([]spec.LinuxDevice, error) {
- files, err := ioutil.ReadDir(path)
- if err != nil {
- if rootless.IsRootless() && os.IsPermission(err) {
- return nil, nil
- }
- return nil, err
- }
- out := []spec.LinuxDevice{}
- for _, f := range files {
- switch {
- case f.IsDir():
- switch f.Name() {
- // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825
- case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
- continue
- default:
- sub, err := getDevices(filepath.Join(path, f.Name()))
- if err != nil {
- return nil, err
- }
- if sub != nil {
- out = append(out, sub...)
- }
- continue
- }
- case f.Name() == "console":
- continue
- case f.Mode()&os.ModeSymlink != 0:
- continue
- }
-
- device, err := deviceFromPath(filepath.Join(path, f.Name()))
- if err != nil {
- if err == errNotADevice {
- continue
- }
- if os.IsNotExist(err) {
- continue
- }
- return nil, err
- }
- out = append(out, *device)
- }
- return out, nil
-}
-
func addDevice(g *generate.Generator, device string) error {
src, dst, permissions, err := ParseDevice(device)
if err != nil {
return err
}
- dev, err := deviceFromPath(src)
+ dev, err := util.DeviceFromPath(src)
if err != nil {
- return errors.Wrapf(err, "%s is not a valid device", src)
+ return fmt.Errorf("%s is not a valid device: %w", src, err)
}
if rootless.IsRootless() {
if _, err := os.Stat(src); err != nil {
@@ -262,7 +163,7 @@ func addDevice(g *generate.Generator, device string) error {
}
// ParseDevice parses device mapping string to a src, dest & permissions string
-func ParseDevice(device string) (string, string, string, error) { //nolint
+func ParseDevice(device string) (string, string, string, error) {
var src string
var dst string
permissions := "rwm"
@@ -316,43 +217,6 @@ func IsValidDeviceMode(mode string) bool {
return true
}
-// Copied from github.com/opencontainers/runc/libcontainer/devices
-// Given the path to a device look up the information about a linux device
-func deviceFromPath(path string) (*spec.LinuxDevice, error) {
- var stat unix.Stat_t
- err := unix.Lstat(path, &stat)
- if err != nil {
- return nil, err
- }
- var (
- devType string
- mode = stat.Mode
- devNumber = uint64(stat.Rdev) // nolint: unconvert
- m = os.FileMode(mode)
- )
-
- switch {
- case mode&unix.S_IFBLK == unix.S_IFBLK:
- devType = "b"
- case mode&unix.S_IFCHR == unix.S_IFCHR:
- devType = "c"
- case mode&unix.S_IFIFO == unix.S_IFIFO:
- devType = "p"
- default:
- return nil, errNotADevice
- }
-
- return &spec.LinuxDevice{
- Type: devType,
- Path: path,
- FileMode: &m,
- UID: &stat.Uid,
- GID: &stat.Gid,
- Major: int64(unix.Major(devNumber)),
- Minor: int64(unix.Minor(devNumber)),
- }, nil
-}
-
func supportAmbientCapabilities() bool {
err := unix.Prctl(unix.PR_CAP_AMBIENT, unix.PR_CAP_AMBIENT_IS_SET, 0, 0, 0)
return err == nil
diff --git a/pkg/specgen/generate/config_linux_cgo.go b/pkg/specgen/generate/config_linux_cgo.go
index efab6679a..74ba4aeeb 100644
--- a/pkg/specgen/generate/config_linux_cgo.go
+++ b/pkg/specgen/generate/config_linux_cgo.go
@@ -5,6 +5,8 @@ package generate
import (
"context"
+ "errors"
+ "fmt"
"io/ioutil"
"github.com/containers/common/libimage"
@@ -12,7 +14,6 @@ import (
"github.com/containers/podman/v4/pkg/seccomp"
"github.com/containers/podman/v4/pkg/specgen"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -39,7 +40,7 @@ func getSeccompConfig(s *specgen.SpecGenerator, configSpec *spec.Spec, img *libi
logrus.Debug("Loading seccomp profile from the security config")
seccompConfig, err = goSeccomp.LoadProfile(imagePolicy, configSpec)
if err != nil {
- return nil, errors.Wrap(err, "loading seccomp profile failed")
+ return nil, fmt.Errorf("loading seccomp profile failed: %w", err)
}
return seccompConfig, nil
}
@@ -48,17 +49,17 @@ func getSeccompConfig(s *specgen.SpecGenerator, configSpec *spec.Spec, img *libi
logrus.Debugf("Loading seccomp profile from %q", s.SeccompProfilePath)
seccompProfile, err := ioutil.ReadFile(s.SeccompProfilePath)
if err != nil {
- return nil, errors.Wrap(err, "opening seccomp profile failed")
+ return nil, fmt.Errorf("opening seccomp profile failed: %w", err)
}
seccompConfig, err = goSeccomp.LoadProfile(string(seccompProfile), configSpec)
if err != nil {
- return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", s.SeccompProfilePath)
+ return nil, fmt.Errorf("loading seccomp profile (%s) failed: %w", s.SeccompProfilePath, err)
}
} else {
logrus.Debug("Loading default seccomp profile")
seccompConfig, err = goSeccomp.GetDefaultProfile(configSpec)
if err != nil {
- return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", s.SeccompProfilePath)
+ return nil, fmt.Errorf("loading seccomp profile (%s) failed: %w", s.SeccompProfilePath, err)
}
}
diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go
index cc376125f..2248c9235 100644
--- a/pkg/specgen/generate/container.go
+++ b/pkg/specgen/generate/container.go
@@ -3,6 +3,7 @@ package generate
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"os"
"strings"
@@ -17,7 +18,6 @@ import (
"github.com/containers/podman/v4/pkg/signal"
"github.com/containers/podman/v4/pkg/specgen"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -38,10 +38,19 @@ func getImageFromSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGen
}
// Need to look up image.
- image, resolvedName, err := r.LibimageRuntime().LookupImage(s.Image, nil)
+ lookupOptions := &libimage.LookupImageOptions{ManifestList: true}
+ image, resolvedName, err := r.LibimageRuntime().LookupImage(s.Image, lookupOptions)
if err != nil {
return nil, "", nil, err
}
+ manifestList, err := image.ToManifestList()
+ // only process if manifest list found otherwise expect it to be regular image
+ if err == nil {
+ image, err = manifestList.LookupInstance(ctx, s.ImageArch, s.ImageOS, s.ImageVariant)
+ if err != nil {
+ return nil, "", nil, err
+ }
+ }
s.SetImage(image, resolvedName)
inspectData, err := image.Inspect(ctx, nil)
if err != nil {
@@ -106,7 +115,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat
// Get Default Environment from containers.conf
defaultEnvs, err := envLib.ParseSlice(rtc.GetDefaultEnvEx(s.EnvHost, s.HTTPProxy))
if err != nil {
- return nil, errors.Wrap(err, "error parsing fields in containers.conf")
+ return nil, fmt.Errorf("error parsing fields in containers.conf: %w", err)
}
var envs map[string]string
@@ -116,7 +125,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat
// already, overriding the default environments
envs, err = envLib.ParseSlice(inspectData.Config.Env)
if err != nil {
- return nil, errors.Wrap(err, "Env fields from image failed to parse")
+ return nil, fmt.Errorf("env fields from image failed to parse: %w", err)
}
defaultEnvs = envLib.Join(envLib.DefaultEnvVariables(), envLib.Join(defaultEnvs, envs))
}
@@ -132,7 +141,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat
// any case.
osEnv, err := envLib.ParseSlice(os.Environ())
if err != nil {
- return nil, errors.Wrap(err, "error parsing host environment variables")
+ return nil, fmt.Errorf("error parsing host environment variables: %w", err)
}
// Caller Specified defaults
if s.EnvHost {
@@ -303,8 +312,8 @@ func FinishThrottleDevices(s *specgen.SpecGenerator) error {
if err := unix.Stat(k, &statT); err != nil {
return err
}
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert
+ v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
+ v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
if s.ResourceLimits.BlockIO == nil {
s.ResourceLimits.BlockIO = new(spec.LinuxBlockIO)
}
@@ -317,8 +326,8 @@ func FinishThrottleDevices(s *specgen.SpecGenerator) error {
if err := unix.Stat(k, &statT); err != nil {
return err
}
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert
+ v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
+ v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice, v)
}
}
@@ -328,8 +337,8 @@ func FinishThrottleDevices(s *specgen.SpecGenerator) error {
if err := unix.Stat(k, &statT); err != nil {
return err
}
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert
+ v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
+ v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice, v)
}
}
@@ -339,8 +348,8 @@ func FinishThrottleDevices(s *specgen.SpecGenerator) error {
if err := unix.Stat(k, &statT); err != nil {
return err
}
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert
+ v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
+ v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice, v)
}
}
@@ -450,7 +459,7 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, contaierID s
specg.IpcNS = specgen.Namespace{NSMode: specgen.Default} // default
}
case "uts":
- specg.UtsNS = specgen.Namespace{NSMode: specgen.Default} // default
+ specg.UtsNS = specgen.Namespace{NSMode: specgen.Private} // default
case "user":
if conf.AddCurrentUserPasswdEntry {
specg.UserNS = specgen.Namespace{NSMode: specgen.KeepID}
@@ -506,6 +515,7 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, contaierID s
specg.Mounts = mounts
specg.HostDeviceList = conf.DeviceHostSrc
specg.Networks = conf.Networks
+ specg.ShmSize = &conf.ShmSize
mapSecurityConfig(conf, specg)
diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go
index 04e24d625..51d290bb4 100644
--- a/pkg/specgen/generate/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -3,6 +3,8 @@ package generate
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"path/filepath"
"strings"
@@ -15,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -34,7 +35,7 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
if s.Pod != "" {
pod, err = rt.LookupPod(s.Pod)
if err != nil {
- return nil, nil, nil, errors.Wrapf(err, "error retrieving pod %s", s.Pod)
+ return nil, nil, nil, fmt.Errorf("error retrieving pod %s: %w", s.Pod, err)
}
if pod.HasInfraContainer() {
infra, err = pod.InfraContainer()
@@ -133,8 +134,14 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
options = append(options, libpod.WithRootFSFromImage(newImage.ID(), resolvedImageName, s.RawImageName))
}
+
+ _, err = rt.LookupPod(s.Hostname)
+ if len(s.Hostname) > 0 && !s.UtsNS.IsPrivate() && err == nil {
+ // ok, we are incorrectly setting the pod as the hostname, lets undo that before validation
+ s.Hostname = ""
+ }
if err := s.Validate(); err != nil {
- return nil, nil, nil, errors.Wrap(err, "invalid config provided")
+ return nil, nil, nil, fmt.Errorf("invalid config provided: %w", err)
}
finalMounts, finalVolumes, finalOverlays, err := finalizeMounts(ctx, s, rt, rtc, newImage)
@@ -180,10 +187,23 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
if err != nil {
return nil, nil, nil, err
}
+ resources := runtimeSpec.Linux.Resources
+
+ // resources get overwrritten similarly to pod inheritance, manually assign here if there is a new value
+ marshalRes, err := json.Marshal(resources)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
err = json.Unmarshal(out, runtimeSpec.Linux)
if err != nil {
return nil, nil, nil, err
}
+
+ err = json.Unmarshal(marshalRes, runtimeSpec.Linux.Resources)
+ if err != nil {
+ return nil, nil, nil, err
+ }
}
if s.ResourceLimits != nil {
switch {
@@ -278,6 +298,10 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
options = append(options, libpod.WithPasswdEntry(s.PasswdEntry))
}
+ if s.Privileged {
+ options = append(options, libpod.WithMountAllDevices())
+ }
+
useSystemd := false
switch s.Systemd {
case "always":
@@ -309,7 +333,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
}
}
default:
- return nil, errors.Wrapf(err, "invalid value %q systemd option requires 'true, false, always'", s.Systemd)
+ return nil, fmt.Errorf("invalid value %q systemd option requires 'true, false, always': %w", s.Systemd, err)
}
logrus.Debugf("using systemd mode: %t", useSystemd)
if useSystemd {
@@ -318,7 +342,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
if s.StopSignal == nil {
stopSignal, err := util.ParseSignal("RTMIN+3")
if err != nil {
- return nil, errors.Wrapf(err, "error parsing systemd signal")
+ return nil, fmt.Errorf("error parsing systemd signal: %w", err)
}
s.StopSignal = &stopSignal
}
@@ -513,7 +537,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
for _, ctr := range s.DependencyContainers {
depCtr, err := rt.LookupContainer(ctr)
if err != nil {
- return nil, errors.Wrapf(err, "%q is not a valid container, cannot be used as a dependency", ctr)
+ return nil, fmt.Errorf("%q is not a valid container, cannot be used as a dependency: %w", ctr, err)
}
deps = append(deps, depCtr)
}
@@ -542,6 +566,16 @@ func Inherit(infra libpod.Container, s *specgen.SpecGenerator, rt *libpod.Runtim
infraConf := infra.Config()
infraSpec := infraConf.Spec
+ // need to set compatOptions to the currently filled specgenOptions so we do not overwrite
+ compatibleOptions.CapAdd = append(compatibleOptions.CapAdd, s.CapAdd...)
+ compatibleOptions.CapDrop = append(compatibleOptions.CapDrop, s.CapDrop...)
+ compatibleOptions.HostDeviceList = append(compatibleOptions.HostDeviceList, s.HostDeviceList...)
+ compatibleOptions.ImageVolumes = append(compatibleOptions.ImageVolumes, s.ImageVolumes...)
+ compatibleOptions.Mounts = append(compatibleOptions.Mounts, s.Mounts...)
+ compatibleOptions.OverlayVolumes = append(compatibleOptions.OverlayVolumes, s.OverlayVolumes...)
+ compatibleOptions.SelinuxOpts = append(compatibleOptions.SelinuxOpts, s.SelinuxOpts...)
+ compatibleOptions.Volumes = append(compatibleOptions.Volumes, s.Volumes...)
+
compatByte, err := json.Marshal(compatibleOptions)
if err != nil {
return nil, nil, nil, err
@@ -550,5 +584,10 @@ func Inherit(infra libpod.Container, s *specgen.SpecGenerator, rt *libpod.Runtim
if err != nil {
return nil, nil, nil, err
}
+
+ // this causes errors when shmSize is the default value, it will still get passed down unless we manually override.
+ if s.IpcNS.NSMode == specgen.Host && (compatibleOptions.ShmSize != nil && compatibleOptions.IsDefaultShmSize()) {
+ s.ShmSize = nil
+ }
return options, infraSpec, compatibleOptions, nil
}
diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go
index f37d79798..454a1e1d0 100644
--- a/pkg/specgen/generate/kube/kube.go
+++ b/pkg/specgen/generate/kube/kube.go
@@ -3,6 +3,7 @@ package kube
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"math"
"net"
@@ -16,6 +17,7 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/parse"
"github.com/containers/common/pkg/secrets"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/manifest"
"github.com/containers/podman/v4/libpod/define"
ann "github.com/containers/podman/v4/pkg/annotations"
@@ -28,7 +30,6 @@ import (
"github.com/docker/docker/pkg/system"
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -145,7 +146,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
// pod name should be non-empty for Deployment objects to be able to create
// multiple pods having containers with unique names
if len(opts.PodName) < 1 {
- return nil, errors.Errorf("got empty pod name on container creation when playing kube")
+ return nil, errors.New("got empty pod name on container creation when playing kube")
}
s.Name = fmt.Sprintf("%s-%s", opts.PodName, opts.Container.Name)
@@ -162,7 +163,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
for _, o := range opts.LogOptions {
split := strings.SplitN(o, "=", 2)
if len(split) < 2 {
- return nil, errors.Errorf("invalid log option %q", o)
+ return nil, fmt.Errorf("invalid log option %q", o)
}
switch strings.ToLower(split[0]) {
case "driver":
@@ -178,7 +179,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
default:
switch len(split[1]) {
case 0:
- return nil, errors.Wrapf(define.ErrInvalidArg, "invalid log option")
+ return nil, fmt.Errorf("invalid log option: %w", define.ErrInvalidArg)
default:
// tags for journald only
if s.LogConfiguration.Driver == "" || s.LogConfiguration.Driver == define.JournaldLogging {
@@ -195,7 +196,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
setupSecurityContext(s, opts.Container.SecurityContext, opts.PodSecurityContext)
err := setupLivenessProbe(s, opts.Container, opts.RestartPolicy)
if err != nil {
- return nil, errors.Wrap(err, "Failed to configure livenessProbe")
+ return nil, fmt.Errorf("failed to configure livenessProbe: %w", err)
}
// Since we prefix the container name with pod name to work-around the uniqueness requirement,
@@ -206,7 +207,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
s.ResourceLimits = &spec.LinuxResources{}
milliCPU, err := quantityToInt64(opts.Container.Resources.Limits.Cpu())
if err != nil {
- return nil, errors.Wrap(err, "Failed to set CPU quota")
+ return nil, fmt.Errorf("failed to set CPU quota: %w", err)
}
if milliCPU > 0 {
period, quota := util.CoresToPeriodAndQuota(float64(milliCPU))
@@ -218,12 +219,12 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
limit, err := quantityToInt64(opts.Container.Resources.Limits.Memory())
if err != nil {
- return nil, errors.Wrap(err, "Failed to set memory limit")
+ return nil, fmt.Errorf("failed to set memory limit: %w", err)
}
memoryRes, err := quantityToInt64(opts.Container.Resources.Requests.Memory())
if err != nil {
- return nil, errors.Wrap(err, "Failed to set memory reservation")
+ return nil, fmt.Errorf("failed to set memory reservation: %w", err)
}
if limit > 0 || memoryRes > 0 {
@@ -336,7 +337,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
for _, volume := range opts.Container.VolumeMounts {
volumeSource, exists := opts.Volumes[volume.Name]
if !exists {
- return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
+ return nil, fmt.Errorf("volume mount %s specified for container but not configured in volumes", volume.Name)
}
// Skip if the volume is optional. This means that a configmap for a configmap volume was not found but it was
// optional so we can move on without throwing an error
@@ -356,7 +357,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
// a selinux mount option exists for it
for k, v := range opts.Annotations {
// Make sure the z/Z option is not already there (from editing the YAML)
- if strings.Replace(k, define.BindMountPrefix, "", 1) == volumeSource.Source && !util.StringInSlice("z", options) && !util.StringInSlice("Z", options) {
+ if strings.Replace(k, define.BindMountPrefix, "", 1) == volumeSource.Source && !cutil.StringInSlice("z", options) && !cutil.StringInSlice("Z", options) {
options = append(options, v)
}
}
@@ -398,7 +399,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
}
s.Devices = append(s.Devices, device)
default:
- return nil, errors.Errorf("Unsupported volume source type")
+ return nil, errors.New("unsupported volume source type")
}
}
@@ -431,21 +432,21 @@ func parseMountPath(mountPath string, readOnly bool, propagationMode *v1.MountPr
options := []string{}
splitVol := strings.Split(mountPath, ":")
if len(splitVol) > 2 {
- return "", options, errors.Errorf("%q incorrect volume format, should be ctr-dir[:option]", mountPath)
+ return "", options, fmt.Errorf("%q incorrect volume format, should be ctr-dir[:option]", mountPath)
}
dest := splitVol[0]
if len(splitVol) > 1 {
options = strings.Split(splitVol[1], ",")
}
if err := parse.ValidateVolumeCtrDir(dest); err != nil {
- return "", options, errors.Wrapf(err, "parsing MountPath")
+ return "", options, fmt.Errorf("parsing MountPath: %w", err)
}
if readOnly {
options = append(options, "ro")
}
opts, err := parse.ValidateVolumeOpts(options)
if err != nil {
- return "", opts, errors.Wrapf(err, "parsing MountOptions")
+ return "", opts, fmt.Errorf("parsing MountOptions: %w", err)
}
if propagationMode != nil {
switch *propagationMode {
@@ -456,7 +457,7 @@ func parseMountPath(mountPath string, readOnly bool, propagationMode *v1.MountPr
case v1.MountPropagationBidirectional:
opts = append(opts, "rshared")
default:
- return "", opts, errors.Errorf("unknown propagation mode %q", *propagationMode)
+ return "", opts, fmt.Errorf("unknown propagation mode %q", *propagationMode)
}
}
return dest, opts, nil
@@ -503,19 +504,19 @@ func setupLivenessProbe(s *specgen.SpecGenerator, containerYAML v1.Container, re
func makeHealthCheck(inCmd string, interval int32, retries int32, timeout int32, startPeriod int32) (*manifest.Schema2HealthConfig, error) {
// Every healthcheck requires a command
if len(inCmd) == 0 {
- return nil, errors.New("Must define a healthcheck command for all healthchecks")
+ return nil, errors.New("must define a healthcheck command for all healthchecks")
}
// first try to parse option value as JSON array of strings...
cmd := []string{}
if inCmd == "none" {
- cmd = []string{"NONE"}
+ cmd = []string{define.HealthConfigTestNone}
} else {
err := json.Unmarshal([]byte(inCmd), &cmd)
if err != nil {
// ...otherwise pass it to "/bin/sh -c" inside the container
- cmd = []string{"CMD-SHELL"}
+ cmd = []string{define.HealthConfigTestCmdShell}
cmd = append(cmd, strings.Split(inCmd, " ")...)
}
}
@@ -629,7 +630,7 @@ func quantityToInt64(quantity *resource.Quantity) (int64, error) {
return i, nil
}
- return 0, errors.Errorf("Quantity cannot be represented as int64: %v", quantity)
+ return 0, fmt.Errorf("quantity cannot be represented as int64: %v", quantity)
}
// read a k8s secret in JSON format from the secret manager
@@ -641,7 +642,7 @@ func k8sSecretFromSecretManager(name string, secretsManager *secrets.SecretsMana
var secrets map[string][]byte
if err := json.Unmarshal(jsonSecret, &secrets); err != nil {
- return nil, errors.Errorf("Secret %v is not valid JSON: %v", name, err)
+ return nil, fmt.Errorf("secret %v is not valid JSON: %v", name, err)
}
return secrets, nil
}
@@ -652,7 +653,7 @@ func envVarsFrom(envFrom v1.EnvFromSource, opts *CtrSpecGenOptions) (map[string]
if envFrom.ConfigMapRef != nil {
cmRef := envFrom.ConfigMapRef
- err := errors.Errorf("Configmap %v not found", cmRef.Name)
+ err := fmt.Errorf("configmap %v not found", cmRef.Name)
for _, c := range opts.ConfigMaps {
if cmRef.Name == c.Name {
@@ -688,14 +689,14 @@ func envVarValue(env v1.EnvVar, opts *CtrSpecGenOptions) (*string, error) {
if env.ValueFrom != nil {
if env.ValueFrom.ConfigMapKeyRef != nil {
cmKeyRef := env.ValueFrom.ConfigMapKeyRef
- err := errors.Errorf("Cannot set env %v: configmap %v not found", env.Name, cmKeyRef.Name)
+ err := fmt.Errorf("cannot set env %v: configmap %v not found", env.Name, cmKeyRef.Name)
for _, c := range opts.ConfigMaps {
if cmKeyRef.Name == c.Name {
if value, ok := c.Data[cmKeyRef.Key]; ok {
return &value, nil
}
- err = errors.Errorf("Cannot set env %v: key %s not found in configmap %v", env.Name, cmKeyRef.Key, cmKeyRef.Name)
+ err = fmt.Errorf("cannot set env %v: key %s not found in configmap %v", env.Name, cmKeyRef.Key, cmKeyRef.Name)
break
}
}
@@ -713,10 +714,10 @@ func envVarValue(env v1.EnvVar, opts *CtrSpecGenOptions) (*string, error) {
value := string(val)
return &value, nil
}
- err = errors.Errorf("Secret %v has not %v key", secKeyRef.Name, secKeyRef.Key)
+ err = fmt.Errorf("secret %v has not %v key", secKeyRef.Name, secKeyRef.Key)
}
if secKeyRef.Optional == nil || !*secKeyRef.Optional {
- return nil, errors.Errorf("Cannot set env %v: %v", env.Name, err)
+ return nil, fmt.Errorf("cannot set env %v: %v", env.Name, err)
}
return nil, nil
}
@@ -760,8 +761,8 @@ func envVarValueFieldRef(env v1.EnvVar, opts *CtrSpecGenOptions) (*string, error
return &annotationValue, nil
}
- return nil, errors.Errorf(
- "Can not set env %v. Reason: fieldPath %v is either not valid or not supported",
+ return nil, fmt.Errorf(
+ "can not set env %v. Reason: fieldPath %v is either not valid or not supported",
env.Name, fieldPath,
)
}
@@ -795,22 +796,22 @@ func envVarValueResourceFieldRef(env v1.EnvVar, opts *CtrSpecGenOptions) (*strin
value = resources.Requests.Cpu()
isValidDivisor = isCPUDivisor(divisor)
default:
- return nil, errors.Errorf(
- "Can not set env %v. Reason: resource %v is either not valid or not supported",
+ return nil, fmt.Errorf(
+ "can not set env %v. Reason: resource %v is either not valid or not supported",
env.Name, resourceName,
)
}
if !isValidDivisor {
- return nil, errors.Errorf(
- "Can not set env %s. Reason: divisor value %s is not valid",
+ return nil, fmt.Errorf(
+ "can not set env %s. Reason: divisor value %s is not valid",
env.Name, divisor.String(),
)
}
// k8s rounds up the result to the nearest integer
- intValue := int(math.Ceil(value.AsApproximateFloat64() / divisor.AsApproximateFloat64()))
- stringValue := strconv.Itoa(intValue)
+ intValue := int64(math.Ceil(value.AsApproximateFloat64() / divisor.AsApproximateFloat64()))
+ stringValue := strconv.FormatInt(intValue, 10)
return &stringValue, nil
}
diff --git a/pkg/specgen/generate/kube/play_test.go b/pkg/specgen/generate/kube/play_test.go
index e01d62b08..466dab610 100644
--- a/pkg/specgen/generate/kube/play_test.go
+++ b/pkg/specgen/generate/kube/play_test.go
@@ -2,7 +2,6 @@ package kube
import (
"encoding/json"
- "fmt"
"math"
"runtime"
"strconv"
@@ -777,8 +776,7 @@ func TestEnvVarValue(t *testing.T) {
if test.expected == nilString {
assert.Nil(t, result)
} else {
- fmt.Println(*result, test.expected)
- assert.Equal(t, &(test.expected), result)
+ assert.Equal(t, test.expected, *result)
}
})
}
diff --git a/pkg/specgen/generate/kube/seccomp.go b/pkg/specgen/generate/kube/seccomp.go
index 8f93b34ff..6e3accd8b 100644
--- a/pkg/specgen/generate/kube/seccomp.go
+++ b/pkg/specgen/generate/kube/seccomp.go
@@ -1,12 +1,12 @@
package kube
import (
+ "fmt"
"path/filepath"
"strings"
"github.com/containers/podman/v4/libpod"
v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1"
- "github.com/pkg/errors"
)
// KubeSeccompPaths holds information about a pod YAML's seccomp configuration
@@ -42,7 +42,7 @@ func InitializeSeccompPaths(annotations map[string]string, profileRoot string) (
// this could be caused by a user inputting either of
// container.seccomp.security.alpha.kubernetes.io{,/}
// both of which are invalid
- return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0])
+ return nil, fmt.Errorf("invalid seccomp path: %s", prefixAndCtr[0])
}
path, err := verifySeccompPath(seccomp, profileRoot)
@@ -80,6 +80,6 @@ func verifySeccompPath(path string, profileRoot string) (string, error) {
if parts[0] == "localhost" {
return filepath.Join(profileRoot, parts[1]), nil
}
- return "", errors.Errorf("invalid seccomp path: %s", path)
+ return "", fmt.Errorf("invalid seccomp path: %s", path)
}
}
diff --git a/pkg/specgen/generate/kube/volume.go b/pkg/specgen/generate/kube/volume.go
index 1d6d49b9d..f5c0c241d 100644
--- a/pkg/specgen/generate/kube/volume.go
+++ b/pkg/specgen/generate/kube/volume.go
@@ -1,12 +1,13 @@
package kube
import (
+ "errors"
+ "fmt"
"os"
"github.com/containers/common/pkg/parse"
"github.com/containers/podman/v4/libpod"
v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -56,13 +57,13 @@ func VolumeFromHostPath(hostPath *v1.HostPathVolumeSource) (*KubeVolume, error)
}
// Label a newly created volume
if err := libpod.LabelVolumePath(hostPath.Path); err != nil {
- return nil, errors.Wrapf(err, "error giving %s a label", hostPath.Path)
+ return nil, fmt.Errorf("error giving %s a label: %w", hostPath.Path, err)
}
case v1.HostPathFileOrCreate:
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, kubeFilePermission)
if err != nil {
- return nil, errors.Wrap(err, "error creating HostPath")
+ return nil, fmt.Errorf("error creating HostPath: %w", err)
}
if err := f.Close(); err != nil {
logrus.Warnf("Error in closing newly created HostPath file: %v", err)
@@ -70,23 +71,23 @@ func VolumeFromHostPath(hostPath *v1.HostPathVolumeSource) (*KubeVolume, error)
}
// unconditionally label a newly created volume
if err := libpod.LabelVolumePath(hostPath.Path); err != nil {
- return nil, errors.Wrapf(err, "error giving %s a label", hostPath.Path)
+ return nil, fmt.Errorf("error giving %s a label: %w", hostPath.Path, err)
}
case v1.HostPathSocket:
st, err := os.Stat(hostPath.Path)
if err != nil {
- return nil, errors.Wrap(err, "error checking HostPathSocket")
+ return nil, fmt.Errorf("error checking HostPathSocket: %w", err)
}
if st.Mode()&os.ModeSocket != os.ModeSocket {
- return nil, errors.Errorf("checking HostPathSocket: path %s is not a socket", hostPath.Path)
+ return nil, fmt.Errorf("checking HostPathSocket: path %s is not a socket", hostPath.Path)
}
case v1.HostPathBlockDev:
dev, err := os.Stat(hostPath.Path)
if err != nil {
- return nil, errors.Wrap(err, "error checking HostPathBlockDevice")
+ return nil, fmt.Errorf("error checking HostPathBlockDevice: %w", err)
}
if dev.Mode()&os.ModeCharDevice == os.ModeCharDevice {
- return nil, errors.Errorf("checking HostPathDevice: path %s is not a block device", hostPath.Path)
+ return nil, fmt.Errorf("checking HostPathDevice: path %s is not a block device", hostPath.Path)
}
return &KubeVolume{
Type: KubeVolumeTypeBlockDevice,
@@ -95,10 +96,10 @@ func VolumeFromHostPath(hostPath *v1.HostPathVolumeSource) (*KubeVolume, error)
case v1.HostPathCharDev:
dev, err := os.Stat(hostPath.Path)
if err != nil {
- return nil, errors.Wrap(err, "error checking HostPathCharDevice")
+ return nil, fmt.Errorf("error checking HostPathCharDevice: %w", err)
}
if dev.Mode()&os.ModeCharDevice != os.ModeCharDevice {
- return nil, errors.Errorf("checking HostPathCharDevice: path %s is not a character device", hostPath.Path)
+ return nil, fmt.Errorf("checking HostPathCharDevice: path %s is not a character device", hostPath.Path)
}
return &KubeVolume{
Type: KubeVolumeTypeCharDevice,
@@ -110,12 +111,12 @@ func VolumeFromHostPath(hostPath *v1.HostPathVolumeSource) (*KubeVolume, error)
// do nothing here because we will verify the path exists in validateVolumeHostDir
break
default:
- return nil, errors.Errorf("Invalid HostPath type %v", hostPath.Type)
+ return nil, fmt.Errorf("invalid HostPath type %v", hostPath.Type)
}
}
if err := parse.ValidateVolumeHostDir(hostPath.Path); err != nil {
- return nil, errors.Wrapf(err, "error in parsing HostPath in YAML")
+ return nil, fmt.Errorf("error in parsing HostPath in YAML: %w", err)
}
return &KubeVolume{
@@ -152,7 +153,7 @@ func VolumeFromConfigMap(configMapVolumeSource *v1.ConfigMapVolumeSource, config
kv.Optional = *configMapVolumeSource.Optional
return kv, nil
}
- return nil, errors.Errorf("no such ConfigMap %q", configMapVolumeSource.Name)
+ return nil, fmt.Errorf("no such ConfigMap %q", configMapVolumeSource.Name)
}
// If there are Items specified in the volumeSource, that overwrites the Data from the configmap
@@ -180,7 +181,7 @@ func VolumeFromSource(volumeSource v1.VolumeSource, configMaps []v1.ConfigMap) (
case volumeSource.ConfigMap != nil:
return VolumeFromConfigMap(volumeSource.ConfigMap, configMaps)
default:
- return nil, errors.Errorf("HostPath, ConfigMap, and PersistentVolumeClaim are currently the only supported VolumeSource")
+ return nil, errors.New("HostPath, ConfigMap, and PersistentVolumeClaim are currently the only supported VolumeSource")
}
}
@@ -191,7 +192,7 @@ func InitializeVolumes(specVolumes []v1.Volume, configMaps []v1.ConfigMap) (map[
for _, specVolume := range specVolumes {
volume, err := VolumeFromSource(specVolume.VolumeSource, configMaps)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create volume %q", specVolume.Name)
+ return nil, fmt.Errorf("failed to create volume %q: %w", specVolume.Name, err)
}
volumes[specVolume.Name] = volume
diff --git a/pkg/specgen/generate/namespaces.go b/pkg/specgen/generate/namespaces.go
index 37d561ec2..f0d4e9153 100644
--- a/pkg/specgen/generate/namespaces.go
+++ b/pkg/specgen/generate/namespaces.go
@@ -1,6 +1,7 @@
package generate
import (
+ "errors"
"fmt"
"os"
"strings"
@@ -15,10 +16,11 @@ import (
"github.com/containers/podman/v4/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+const host = "host"
+
// Get the default namespace mode for any given namespace type.
func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod) (specgen.Namespace, error) {
// The default for most is private
@@ -33,16 +35,38 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod)
podMode := false
switch {
case nsType == "pid" && pod.SharesPID():
+ if pod.NamespaceMode(spec.PIDNamespace) == host {
+ toReturn.NSMode = specgen.Host
+ return toReturn, nil
+ }
podMode = true
case nsType == "ipc" && pod.SharesIPC():
+ if pod.NamespaceMode(spec.IPCNamespace) == host {
+ toReturn.NSMode = specgen.Host
+ return toReturn, nil
+ }
podMode = true
case nsType == "uts" && pod.SharesUTS():
+ if pod.NamespaceMode(spec.UTSNamespace) == host {
+ toReturn.NSMode = specgen.Host
+ return toReturn, nil
+ }
podMode = true
case nsType == "user" && pod.SharesUser():
+ // user does not need a special check for host, this is already validated on pod creation
+ // if --userns=host then pod.SharesUser == false
podMode = true
case nsType == "net" && pod.SharesNet():
+ if pod.NetworkMode() == host {
+ toReturn.NSMode = specgen.Host
+ return toReturn, nil
+ }
podMode = true
case nsType == "cgroup" && pod.SharesCgroup():
+ if pod.NamespaceMode(spec.CgroupNamespace) == host {
+ toReturn.NSMode = specgen.Host
+ return toReturn, nil
+ }
podMode = true
}
if podMode {
@@ -70,7 +94,7 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod)
return ns, err
}
- return toReturn, errors.Wrapf(define.ErrInvalidArg, "invalid namespace type %q passed", nsType)
+ return toReturn, fmt.Errorf("invalid namespace type %q passed: %w", nsType, define.ErrInvalidArg)
}
// namespaceOptions generates container creation options for all
@@ -89,18 +113,18 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
if err != nil {
// This is likely to be of the fatal kind (pod was
// removed) so hard fail
- return nil, errors.Wrapf(err, "error looking up pod %s infra container", pod.ID())
+ return nil, fmt.Errorf("error looking up pod %s infra container: %w", pod.ID(), err)
}
if infraID != "" {
ctr, err := rt.GetContainer(infraID)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving pod %s infra container %s", pod.ID(), infraID)
+ return nil, fmt.Errorf("error retrieving pod %s infra container %s: %w", pod.ID(), infraID, err)
}
infraCtr = ctr
}
}
- errNoInfra := errors.Wrapf(define.ErrInvalidArg, "cannot use pod namespace as container is not joining a pod or pod has no infra container")
+ errNoInfra := fmt.Errorf("cannot use pod namespace as container is not joining a pod or pod has no infra container: %w", define.ErrInvalidArg)
// PID
switch s.PidNS.NSMode {
@@ -112,7 +136,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
case specgen.FromContainer:
pidCtr, err := rt.LookupContainer(s.PidNS.Value)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container to share pid namespace with")
+ return nil, fmt.Errorf("error looking up container to share pid namespace with: %w", err)
}
toReturn = append(toReturn, libpod.WithPIDNSFrom(pidCtr))
}
@@ -131,10 +155,10 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
case specgen.FromContainer:
ipcCtr, err := rt.LookupContainer(s.IpcNS.Value)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container to share ipc namespace with")
+ return nil, fmt.Errorf("error looking up container to share ipc namespace with: %w", err)
}
if ipcCtr.ConfigNoCopy().NoShmShare {
- return nil, errors.Errorf("joining IPC of container %s is not allowed: non-shareable IPC (hint: use IpcMode:shareable for the donor container)", ipcCtr.ID())
+ return nil, fmt.Errorf("joining IPC of container %s is not allowed: non-shareable IPC (hint: use IpcMode:shareable for the donor container)", ipcCtr.ID())
}
toReturn = append(toReturn, libpod.WithIPCNSFrom(ipcCtr))
if !ipcCtr.ConfigNoCopy().NoShm {
@@ -152,11 +176,18 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
if pod == nil || infraCtr == nil {
return nil, errNoInfra
}
- toReturn = append(toReturn, libpod.WithUTSNSFrom(infraCtr))
+ if pod.NamespaceMode(spec.UTSNamespace) == host {
+ // adding infra as a nsCtr is not what we want to do when uts == host
+ // this leads the new ctr to try to add an ns path which is should not in this mode
+ logrus.Debug("pod has host uts, not adding infra as a nsCtr")
+ s.UtsNS = specgen.Namespace{NSMode: specgen.Host}
+ } else {
+ toReturn = append(toReturn, libpod.WithUTSNSFrom(infraCtr))
+ }
case specgen.FromContainer:
utsCtr, err := rt.LookupContainer(s.UtsNS.Value)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container to share uts namespace with")
+ return nil, fmt.Errorf("error looking up container to share uts namespace with: %w", err)
}
toReturn = append(toReturn, libpod.WithUTSNSFrom(utsCtr))
}
@@ -191,7 +222,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
case specgen.FromContainer:
userCtr, err := rt.LookupContainer(s.UserNS.Value)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container to share user namespace with")
+ return nil, fmt.Errorf("error looking up container to share user namespace with: %w", err)
}
toReturn = append(toReturn, libpod.WithUserNSFrom(userCtr))
}
@@ -203,7 +234,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
if pod == nil {
toReturn = append(toReturn, libpod.WithIDMappings(*s.IDMappings))
} else if pod.HasInfraContainer() && (len(s.IDMappings.UIDMap) > 0 || len(s.IDMappings.GIDMap) > 0) {
- return nil, errors.Wrapf(define.ErrInvalidArg, "cannot specify a new uid/gid map when entering a pod with an infra container")
+ return nil, fmt.Errorf("cannot specify a new uid/gid map when entering a pod with an infra container: %w", define.ErrInvalidArg)
}
}
if s.User != "" {
@@ -223,7 +254,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
case specgen.FromContainer:
cgroupCtr, err := rt.LookupContainer(s.CgroupNS.Value)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container to share cgroup namespace with")
+ return nil, fmt.Errorf("error looking up container to share cgroup namespace with: %w", err)
}
toReturn = append(toReturn, libpod.WithCgroupNSFrom(cgroupCtr))
}
@@ -236,10 +267,12 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
toReturn = append(toReturn, libpod.WithCgroupsMode(s.CgroupsMode))
}
- // Net
- // TODO validate CNINetworks, StaticIP, StaticIPv6 are only set if we
- // are in bridge mode.
postConfigureNetNS := !s.UserNS.IsHost()
+ // when we are rootless we default to slirp4netns
+ if rootless.IsRootless() && (s.NetNS.IsPrivate() || s.NetNS.IsDefault()) {
+ s.NetNS.NSMode = specgen.Slirp
+ }
+
switch s.NetNS.NSMode {
case specgen.FromPod:
if pod == nil || infraCtr == nil {
@@ -249,7 +282,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
case specgen.FromContainer:
netCtr, err := rt.LookupContainer(s.NetNS.Value)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container to share net namespace with")
+ return nil, fmt.Errorf("error looking up container to share net namespace with: %w", err)
}
toReturn = append(toReturn, libpod.WithNetNSFrom(netCtr))
case specgen.Slirp:
@@ -262,9 +295,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.
val = fmt.Sprintf("slirp4netns:%s", s.NetNS.Value)
}
toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, val, nil))
- case specgen.Private:
- fallthrough
- case specgen.Bridge:
+ case specgen.Bridge, specgen.Private, specgen.Default:
portMappings, expose, err := createPortMappings(s, imageData)
if err != nil {
return nil, err
@@ -331,7 +362,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt
switch s.PidNS.NSMode {
case specgen.Path:
if _, err := os.Stat(s.PidNS.Value); err != nil {
- return errors.Wrap(err, "cannot find specified PID namespace path")
+ return fmt.Errorf("cannot find specified PID namespace path: %w", err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), s.PidNS.Value); err != nil {
return err
@@ -350,7 +381,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt
switch s.IpcNS.NSMode {
case specgen.Path:
if _, err := os.Stat(s.IpcNS.Value); err != nil {
- return errors.Wrap(err, "cannot find specified IPC namespace path")
+ return fmt.Errorf("cannot find specified IPC namespace path: %w", err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), s.IpcNS.Value); err != nil {
return err
@@ -369,7 +400,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt
switch s.UtsNS.NSMode {
case specgen.Path:
if _, err := os.Stat(s.UtsNS.Value); err != nil {
- return errors.Wrap(err, "cannot find specified UTS namespace path")
+ return fmt.Errorf("cannot find specified UTS namespace path: %w", err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), s.UtsNS.Value); err != nil {
return err
@@ -392,13 +423,13 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt
case s.UtsNS.NSMode == specgen.FromContainer:
utsCtr, err := rt.LookupContainer(s.UtsNS.Value)
if err != nil {
- return errors.Wrapf(err, "error looking up container to share uts namespace with")
+ return fmt.Errorf("error looking up container to share uts namespace with: %w", err)
}
hostname = utsCtr.Hostname()
case (s.NetNS.NSMode == specgen.Host && hostname == "") || s.UtsNS.NSMode == specgen.Host:
tmpHostname, err := os.Hostname()
if err != nil {
- return errors.Wrap(err, "unable to retrieve hostname of the host")
+ return fmt.Errorf("unable to retrieve hostname of the host: %w", err)
}
hostname = tmpHostname
default:
@@ -427,7 +458,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt
switch s.CgroupNS.NSMode {
case specgen.Path:
if _, err := os.Stat(s.CgroupNS.Value); err != nil {
- return errors.Wrap(err, "cannot find specified cgroup namespace path")
+ return fmt.Errorf("cannot find specified cgroup namespace path: %w", err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), s.CgroupNS.Value); err != nil {
return err
@@ -446,7 +477,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt
switch s.NetNS.NSMode {
case specgen.Path:
if _, err := os.Stat(s.NetNS.Value); err != nil {
- return errors.Wrap(err, "cannot find specified network namespace path")
+ return fmt.Errorf("cannot find specified network namespace path: %w", err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), s.NetNS.Value); err != nil {
return err
@@ -488,12 +519,9 @@ func GetNamespaceOptions(ns []string, netnsIsHost bool) ([]libpod.PodCreateOptio
case "cgroup":
options = append(options, libpod.WithPodCgroup())
case "net":
- // share the netns setting with other containers in the pod only when it is not set to host
- if !netnsIsHost {
- options = append(options, libpod.WithPodNet())
- }
+ options = append(options, libpod.WithPodNet())
case "mnt":
- return erroredOptions, errors.Errorf("Mount sharing functionality not supported on pod level")
+ return erroredOptions, fmt.Errorf("mount sharing functionality not supported on pod level")
case "pid":
options = append(options, libpod.WithPodPID())
case "user":
@@ -506,7 +534,7 @@ func GetNamespaceOptions(ns []string, netnsIsHost bool) ([]libpod.PodCreateOptio
case "none":
return erroredOptions, nil
default:
- return erroredOptions, errors.Errorf("Invalid kernel namespace to share: %s. Options are: cgroup, ipc, net, pid, uts or none", toShare)
+ return erroredOptions, fmt.Errorf("invalid kernel namespace to share: %s. Options are: cgroup, ipc, net, pid, uts or none", toShare)
}
}
return options, nil
diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go
index dda2de6e4..bb5f2d0ec 100644
--- a/pkg/specgen/generate/oci.go
+++ b/pkg/specgen/generate/oci.go
@@ -3,6 +3,7 @@ package generate
import (
"context"
"encoding/json"
+ "fmt"
"path"
"strings"
@@ -15,7 +16,6 @@ import (
"github.com/containers/podman/v4/pkg/specgen"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -117,7 +117,7 @@ func makeCommand(s *specgen.SpecGenerator, imageData *libimage.ImageData, rtc *c
finalCommand = append(finalCommand, command...)
if len(finalCommand) == 0 {
- return nil, errors.Errorf("no command or entrypoint provided, and no CMD or ENTRYPOINT from image")
+ return nil, fmt.Errorf("no command or entrypoint provided, and no CMD or ENTRYPOINT from image")
}
if s.Init {
@@ -126,7 +126,7 @@ func makeCommand(s *specgen.SpecGenerator, imageData *libimage.ImageData, rtc *c
initPath = rtc.Engine.InitPath
}
if initPath == "" {
- return nil, errors.Errorf("no path to init binary found but container requested an init")
+ return nil, fmt.Errorf("no path to init binary found but container requested an init")
}
finalCommand = append([]string{define.ContainerInitPath, "--"}, finalCommand...)
}
@@ -298,8 +298,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt
g.AddAnnotation(key, val)
}
- switch {
- case compatibleOptions.InfraResources == nil && s.ResourceLimits != nil:
+ if s.ResourceLimits != nil {
out, err := json.Marshal(s.ResourceLimits)
if err != nil {
return nil, err
@@ -308,43 +307,17 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt
if err != nil {
return nil, err
}
- case s.ResourceLimits != nil: // if we have predefined resource limits we need to make sure we keep the infra and container limits
- originalResources, err := json.Marshal(s.ResourceLimits)
- if err != nil {
- return nil, err
- }
- infraResources, err := json.Marshal(compatibleOptions.InfraResources)
- if err != nil {
- return nil, err
- }
- err = json.Unmarshal(infraResources, s.ResourceLimits) // put infra's resource limits in the container
- if err != nil {
- return nil, err
- }
- err = json.Unmarshal(originalResources, s.ResourceLimits) // make sure we did not override anything
- if err != nil {
- return nil, err
- }
g.Config.Linux.Resources = s.ResourceLimits
- default:
- g.Config.Linux.Resources = compatibleOptions.InfraResources
}
// Devices
-
// set the default rule at the beginning of device configuration
if !inUserNS && !s.Privileged {
g.AddLinuxResourcesDevice(false, "", nil, nil, "rwm")
}
var userDevices []spec.LinuxDevice
- if s.Privileged {
- // If privileged, we need to add all the host devices to the
- // spec. We do not add the user provided ones because we are
- // already adding them all.
- if err := addPrivilegedDevices(&g); err != nil {
- return nil, err
- }
- } else {
+
+ if !s.Privileged {
// add default devices from containers.conf
for _, device := range rtc.Containers.Devices {
if err = DevicesFromPath(&g, device); err != nil {
@@ -375,9 +348,9 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt
for k, v := range s.WeightDevice {
statT := unix.Stat_t{}
if err := unix.Stat(k, &statT); err != nil {
- return nil, errors.Wrapf(err, "failed to inspect '%s' in --blkio-weight-device", k)
+ return nil, fmt.Errorf("failed to inspect '%s' in --blkio-weight-device: %w", k, err)
}
- g.AddLinuxResourcesBlockIOWeightDevice((int64(unix.Major(uint64(statT.Rdev)))), (int64(unix.Minor(uint64(statT.Rdev)))), *v.Weight) // nolint: unconvert
+ g.AddLinuxResourcesBlockIOWeightDevice((int64(unix.Major(uint64(statT.Rdev)))), (int64(unix.Minor(uint64(statT.Rdev)))), *v.Weight) //nolint: unconvert
}
BlockAccessToKernelFilesystems(s.Privileged, s.PidNS.IsHost(), s.Mask, s.Unmask, &g)
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index 5b7bb2b57..212d613fe 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -2,13 +2,17 @@ package generate
import (
"context"
+ "fmt"
"net"
+ "os"
+ "strconv"
+ "strings"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/specgen"
- "github.com/pkg/errors"
+ "github.com/containers/podman/v4/pkg/specgenutil"
"github.com/sirupsen/logrus"
)
@@ -55,6 +59,7 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
if err != nil {
return nil, err
}
+
spec.Pod = pod.ID()
opts = append(opts, rt.WithPod(pod))
spec.CgroupParent = pod.CgroupParent()
@@ -141,30 +146,33 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) {
case specgen.Bridge:
p.InfraContainerSpec.NetNS.NSMode = specgen.Bridge
logrus.Debugf("Pod using bridge network mode")
+ case specgen.Private:
+ p.InfraContainerSpec.NetNS.NSMode = specgen.Private
+ logrus.Debugf("Pod will use default network mode")
case specgen.Host:
logrus.Debugf("Pod will use host networking")
if len(p.InfraContainerSpec.PortMappings) > 0 ||
len(p.InfraContainerSpec.Networks) > 0 ||
p.InfraContainerSpec.NetNS.NSMode == specgen.NoNetwork {
- return nil, errors.Wrapf(define.ErrInvalidArg, "cannot set host network if network-related configuration is specified")
+ return nil, fmt.Errorf("cannot set host network if network-related configuration is specified: %w", define.ErrInvalidArg)
}
p.InfraContainerSpec.NetNS.NSMode = specgen.Host
case specgen.Slirp:
logrus.Debugf("Pod will use slirp4netns")
- if p.InfraContainerSpec.NetNS.NSMode != "host" {
+ if p.InfraContainerSpec.NetNS.NSMode != specgen.Host {
p.InfraContainerSpec.NetworkOptions = p.NetworkOptions
- p.InfraContainerSpec.NetNS.NSMode = specgen.NamespaceMode("slirp4netns")
+ p.InfraContainerSpec.NetNS.NSMode = specgen.Slirp
}
case specgen.NoNetwork:
logrus.Debugf("Pod will not use networking")
if len(p.InfraContainerSpec.PortMappings) > 0 ||
len(p.InfraContainerSpec.Networks) > 0 ||
- p.InfraContainerSpec.NetNS.NSMode == "host" {
- return nil, errors.Wrapf(define.ErrInvalidArg, "cannot disable pod network if network-related configuration is specified")
+ p.InfraContainerSpec.NetNS.NSMode == specgen.Host {
+ return nil, fmt.Errorf("cannot disable pod network if network-related configuration is specified: %w", define.ErrInvalidArg)
}
p.InfraContainerSpec.NetNS.NSMode = specgen.NoNetwork
default:
- return nil, errors.Errorf("pods presently do not support network mode %s", p.NetNS.NSMode)
+ return nil, fmt.Errorf("pods presently do not support network mode %s", p.NetNS.NSMode)
}
if len(p.InfraCommand) > 0 {
@@ -207,3 +215,88 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) {
p.InfraContainerSpec.Image = p.InfraImage
return p.InfraContainerSpec, nil
}
+
+func PodConfigToSpec(rt *libpod.Runtime, spec *specgen.PodSpecGenerator, infraOptions *entities.ContainerCreateOptions, id string) (p *libpod.Pod, err error) {
+ pod, err := rt.LookupPod(id)
+ if err != nil {
+ return nil, err
+ }
+
+ infraSpec := &specgen.SpecGenerator{}
+ if pod.HasInfraContainer() {
+ infraID, err := pod.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+ _, _, err = ConfigToSpec(rt, infraSpec, infraID)
+ if err != nil {
+ return nil, err
+ }
+
+ infraSpec.Hostname = ""
+ infraSpec.CgroupParent = ""
+ infraSpec.Pod = "" // remove old pod...
+ infraOptions.IsClone = true
+ infraOptions.IsInfra = true
+
+ n := infraSpec.Name
+ _, err = rt.LookupContainer(n + "-clone")
+ if err == nil { // if we found a ctr with this name, set it so the below switch can tell
+ n += "-clone"
+ }
+
+ switch {
+ case strings.Contains(n, "-clone"):
+ ind := strings.Index(n, "-clone") + 6
+ num, err := strconv.Atoi(n[ind:])
+ if num == 0 && err != nil { // clone1 is hard to get with this logic, just check for it here.
+ _, err = rt.LookupContainer(n + "1")
+ if err != nil {
+ infraSpec.Name = n + "1"
+ break
+ }
+ } else {
+ n = n[0:ind]
+ }
+ err = nil
+ count := num
+ for err == nil {
+ count++
+ tempN := n + strconv.Itoa(count)
+ _, err = rt.LookupContainer(tempN)
+ }
+ n += strconv.Itoa(count)
+ infraSpec.Name = n
+ default:
+ infraSpec.Name = n + "-clone"
+ }
+
+ err = specgenutil.FillOutSpecGen(infraSpec, infraOptions, []string{})
+ if err != nil {
+ return nil, err
+ }
+
+ out, err := CompleteSpec(context.Background(), rt, infraSpec)
+ if err != nil {
+ return nil, err
+ }
+
+ // Print warnings
+ if len(out) > 0 {
+ for _, w := range out {
+ fmt.Println("Could not properly complete the spec as expected:")
+ fmt.Fprintf(os.Stderr, "%s\n", w)
+ }
+ }
+
+ spec.InfraContainerSpec = infraSpec
+ }
+
+ // need to reset hostname, name etc of both pod and infra
+ spec.Hostname = ""
+
+ if len(spec.InfraContainerSpec.Image) > 0 {
+ spec.InfraImage = spec.InfraContainerSpec.Image
+ }
+ return pod, nil
+}
diff --git a/pkg/specgen/generate/ports.go b/pkg/specgen/generate/ports.go
index bec548d3b..572f256c1 100644
--- a/pkg/specgen/generate/ports.go
+++ b/pkg/specgen/generate/ports.go
@@ -10,10 +10,9 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/utils"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgenutil"
- "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -46,7 +45,7 @@ func joinTwoPortsToRangePortIfPossible(ports *[]types.PortMapping, allHostPorts,
// if both host port ranges overlap and the container port range did not match
// we have to error because we cannot assign the same host port to more than one container port
if previousPort.HostPort+previousPort.Range-1 > port.HostPort {
- return nil, errors.Errorf("conflicting port mappings for host port %d (protocol %s)", port.HostPort, port.Protocol)
+ return nil, fmt.Errorf("conflicting port mappings for host port %d (protocol %s)", port.HostPort, port.Protocol)
}
}
// we could not join the ports so we append the old one to the list
@@ -127,7 +126,7 @@ outer:
rangePort = fmt.Sprintf("with range %d ", port.Range)
}
- return port, errors.Errorf("failed to find an open port to expose container port %d %son the host", port.ContainerPort, rangePort)
+ return port, fmt.Errorf("failed to find an open port to expose container port %d %son the host", port.ContainerPort, rangePort)
}
// Parse port maps to port mappings.
@@ -163,7 +162,7 @@ func ParsePortMapping(portMappings []types.PortMapping, exposePorts map[uint16][
}
if port.HostIP != "" {
if ip := net.ParseIP(port.HostIP); ip == nil {
- return nil, errors.Errorf("invalid IP address %q in port mapping", port.HostIP)
+ return nil, fmt.Errorf("invalid IP address %q in port mapping", port.HostIP)
}
}
@@ -174,14 +173,14 @@ func ParsePortMapping(portMappings []types.PortMapping, exposePorts map[uint16][
}
containerPort := port.ContainerPort
if containerPort == 0 {
- return nil, errors.Errorf("container port number must be non-0")
+ return nil, fmt.Errorf("container port number must be non-0")
}
hostPort := port.HostPort
if uint32(portRange-1)+uint32(containerPort) > 65535 {
- return nil, errors.Errorf("container port range exceeds maximum allowable port number")
+ return nil, fmt.Errorf("container port range exceeds maximum allowable port number")
}
if uint32(portRange-1)+uint32(hostPort) > 65535 {
- return nil, errors.Errorf("host port range exceeds maximum allowable port number")
+ return nil, fmt.Errorf("host port range exceeds maximum allowable port number")
}
hostProtoMap, ok := portMap[port.HostIP]
@@ -351,11 +350,11 @@ func createPortMappings(s *specgen.SpecGenerator, imageData *libimage.ImageData)
for _, expose := range []map[uint16]string{expose, s.Expose} {
for port, proto := range expose {
if port == 0 {
- return nil, nil, errors.Errorf("cannot expose 0 as it is not a valid port number")
+ return nil, nil, fmt.Errorf("cannot expose 0 as it is not a valid port number")
}
protocols, err := checkProtocol(proto, false)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error validating protocols for exposed port %d", port)
+ return nil, nil, fmt.Errorf("error validating protocols for exposed port %d: %w", port, err)
}
toExpose[port] = appendProtocolsNoDuplicates(toExpose[port], protocols)
}
@@ -387,11 +386,11 @@ func checkProtocol(protocol string, allowSCTP bool) ([]string, error) {
protocols[protoUDP] = struct{}{}
case protoSCTP:
if !allowSCTP {
- return nil, errors.Errorf("protocol SCTP is not allowed for exposed ports")
+ return nil, fmt.Errorf("protocol SCTP is not allowed for exposed ports")
}
protocols[protoSCTP] = struct{}{}
default:
- return nil, errors.Errorf("unrecognized protocol %q in port mapping", p)
+ return nil, fmt.Errorf("unrecognized protocol %q in port mapping", p)
}
}
@@ -402,7 +401,7 @@ func checkProtocol(protocol string, allowSCTP bool) ([]string, error) {
// This shouldn't be possible, but check anyways
if len(finalProto) == 0 {
- return nil, errors.Errorf("no valid protocols specified for port mapping")
+ return nil, fmt.Errorf("no valid protocols specified for port mapping")
}
return finalProto, nil
@@ -415,7 +414,7 @@ func GenExposedPorts(exposedPorts map[string]struct{}) (map[uint16]string, error
}
toReturn, err := specgenutil.CreateExpose(expose)
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert image EXPOSE")
+ return nil, fmt.Errorf("unable to convert image EXPOSE: %w", err)
}
return toReturn, nil
}
diff --git a/pkg/specgen/generate/security.go b/pkg/specgen/generate/security.go
index ec52164ab..aacefcbac 100644
--- a/pkg/specgen/generate/security.go
+++ b/pkg/specgen/generate/security.go
@@ -1,19 +1,20 @@
package generate
import (
+ "fmt"
"strings"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/apparmor"
"github.com/containers/common/pkg/capabilities"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/util"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -31,11 +32,11 @@ func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig s
} else if pidConfig.IsContainer() {
ctr, err := runtime.LookupContainer(pidConfig.Value)
if err != nil {
- return errors.Wrapf(err, "container %q not found", pidConfig.Value)
+ return fmt.Errorf("container %q not found: %w", pidConfig.Value, err)
}
secopts, err := label.DupSecOpt(ctr.ProcessLabel())
if err != nil {
- return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ return fmt.Errorf("failed to duplicate label %q : %w", ctr.ProcessLabel(), err)
}
labelOpts = append(labelOpts, secopts...)
}
@@ -45,11 +46,11 @@ func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig s
} else if ipcConfig.IsContainer() {
ctr, err := runtime.LookupContainer(ipcConfig.Value)
if err != nil {
- return errors.Wrapf(err, "container %q not found", ipcConfig.Value)
+ return fmt.Errorf("container %q not found: %w", ipcConfig.Value, err)
}
secopts, err := label.DupSecOpt(ctr.ProcessLabel())
if err != nil {
- return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ return fmt.Errorf("failed to duplicate label %q : %w", ctr.ProcessLabel(), err)
}
labelOpts = append(labelOpts, secopts...)
}
@@ -62,7 +63,7 @@ func setupApparmor(s *specgen.SpecGenerator, rtc *config.Config, g *generate.Gen
hasProfile := len(s.ApparmorProfile) > 0
if !apparmor.IsEnabled() {
if hasProfile && s.ApparmorProfile != "unconfined" {
- return errors.Errorf("Apparmor profile %q specified, but Apparmor is not enabled on this system", s.ApparmorProfile)
+ return fmt.Errorf("apparmor profile %q specified, but Apparmor is not enabled on this system", s.ApparmorProfile)
}
return nil
}
@@ -120,7 +121,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
// capabilities, required to run the container.
var capsRequiredRequested []string
for key, val := range s.Labels {
- if util.StringInSlice(key, capabilities.ContainerImageLabels) {
+ if cutil.StringInSlice(key, capabilities.ContainerImageLabels) {
capsRequiredRequested = strings.Split(val, ",")
}
}
@@ -128,11 +129,11 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
// Pass capRequiredRequested in CapAdd field to normalize capabilities names
capsRequired, err := capabilities.MergeCapabilities(nil, capsRequiredRequested, nil)
if err != nil {
- return errors.Wrapf(err, "capabilities requested by user or image are not valid: %q", strings.Join(capsRequired, ","))
+ return fmt.Errorf("capabilities requested by user or image are not valid: %q: %w", strings.Join(capsRequired, ","), err)
}
// Verify all capRequired are in the capList
for _, cap := range capsRequired {
- if !util.StringInSlice(cap, caplist) {
+ if !cutil.StringInSlice(cap, caplist) {
privCapsRequired = append(privCapsRequired, cap)
}
}
@@ -160,7 +161,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
} else {
mergedCaps, err := capabilities.MergeCapabilities(nil, s.CapAdd, nil)
if err != nil {
- return errors.Wrapf(err, "capabilities requested by user are not valid: %q", strings.Join(s.CapAdd, ","))
+ return fmt.Errorf("capabilities requested by user are not valid: %q: %w", strings.Join(s.CapAdd, ","), err)
}
boundingSet, err := capabilities.BoundingSet()
if err != nil {
@@ -244,17 +245,17 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
for sysctlKey, sysctlVal := range s.Sysctl {
if s.IpcNS.IsHost() && strings.HasPrefix(sysctlKey, "fs.mqueue.") {
- return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since IPC Namespace set to host", sysctlKey, sysctlVal)
+ return fmt.Errorf("sysctl %s=%s can't be set since IPC Namespace set to host: %w", sysctlKey, sysctlVal, define.ErrInvalidArg)
}
// Ignore net sysctls if --net=host
if s.NetNS.IsHost() && strings.HasPrefix(sysctlKey, "net.") {
- return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since Network Namespace set to host", sysctlKey, sysctlVal)
+ return fmt.Errorf("sysctl %s=%s can't be set since Network Namespace set to host: %w", sysctlKey, sysctlVal, define.ErrInvalidArg)
}
// Ignore uts sysctls if --uts=host
if s.UtsNS.IsHost() && (strings.HasPrefix(sysctlKey, "kernel.domainname") || strings.HasPrefix(sysctlKey, "kernel.hostname")) {
- return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since UTS Namespace set to host", sysctlKey, sysctlVal)
+ return fmt.Errorf("sysctl %s=%s can't be set since UTS Namespace set to host: %w", sysctlKey, sysctlVal, define.ErrInvalidArg)
}
g.AddLinuxSysctl(sysctlKey, sysctlVal)
diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go
index 0a4d03780..867bb4b79 100644
--- a/pkg/specgen/generate/storage.go
+++ b/pkg/specgen/generate/storage.go
@@ -2,6 +2,7 @@ package generate
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -16,11 +17,10 @@ import (
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-var errDuplicateDest = errors.Errorf("duplicate mount destination")
+var errDuplicateDest = errors.New("duplicate mount destination")
// Produce final mounts and named volumes for a container
func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, rtc *config.Config, img *libimage.Image) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, error) {
@@ -63,7 +63,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
}
cleanDestination := filepath.Clean(m.Destination)
if _, ok := unifiedMounts[cleanDestination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, "conflict in specified mounts - multiple mounts at %q", cleanDestination)
+ return nil, nil, nil, fmt.Errorf("conflict in specified mounts - multiple mounts at %q: %w", cleanDestination, errDuplicateDest)
}
unifiedMounts[cleanDestination] = m
}
@@ -84,7 +84,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
}
cleanDestination := filepath.Clean(v.Dest)
if _, ok := unifiedVolumes[cleanDestination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, "conflict in specified volumes - multiple volumes at %q", cleanDestination)
+ return nil, nil, nil, fmt.Errorf("conflict in specified volumes - multiple volumes at %q: %w", cleanDestination, errDuplicateDest)
}
unifiedVolumes[cleanDestination] = v
}
@@ -105,7 +105,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
}
cleanDestination := filepath.Clean(v.Destination)
if _, ok := unifiedOverlays[cleanDestination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, "conflict in specified volumes - multiple volumes at %q", cleanDestination)
+ return nil, nil, nil, fmt.Errorf("conflict in specified volumes - multiple volumes at %q: %w", cleanDestination, errDuplicateDest)
}
unifiedOverlays[cleanDestination] = v
}
@@ -131,7 +131,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
return nil, nil, nil, err
}
if _, ok := unifiedMounts[initMount.Destination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, "conflict with mount added by --init to %q", initMount.Destination)
+ return nil, nil, nil, fmt.Errorf("conflict with mount added by --init to %q: %w", initMount.Destination, errDuplicateDest)
}
unifiedMounts[initMount.Destination] = initMount
}
@@ -161,12 +161,12 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
// Check for conflicts between named volumes and mounts
for dest := range baseMounts {
if _, ok := baseVolumes[dest]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest)
+ return nil, nil, nil, fmt.Errorf("conflict at mount destination %v: %w", dest, errDuplicateDest)
}
}
for dest := range baseVolumes {
if _, ok := baseMounts[dest]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest)
+ return nil, nil, nil, fmt.Errorf("conflict at mount destination %v: %w", dest, errDuplicateDest)
}
}
// Final step: maps to arrays
@@ -175,7 +175,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
if mount.Type == define.TypeBind {
absSrc, err := filepath.Abs(mount.Source)
if err != nil {
- return nil, nil, nil, errors.Wrapf(err, "error getting absolute path of %s", mount.Source)
+ return nil, nil, nil, fmt.Errorf("error getting absolute path of %s: %w", mount.Source, err)
}
mount.Source = absSrc
}
@@ -208,7 +208,7 @@ func getImageVolumes(ctx context.Context, img *libimage.Image, s *specgen.SpecGe
inspect, err := img.Inspect(ctx, nil)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error inspecting image to get image volumes")
+ return nil, nil, fmt.Errorf("error inspecting image to get image volumes: %w", err)
}
for volume := range inspect.Config.Volumes {
logrus.Debugf("Image has volume at %q", volume)
@@ -252,16 +252,16 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s
switch opt {
case "z":
if setZ {
- return nil, nil, errors.Errorf("cannot set :z more than once in mount options")
+ return nil, nil, errors.New("cannot set :z more than once in mount options")
}
setZ = true
case "ro", "rw":
if setRORW {
- return nil, nil, errors.Errorf("cannot set ro or rw options more than once")
+ return nil, nil, errors.New("cannot set ro or rw options more than once")
}
setRORW = true
default:
- return nil, nil, errors.Errorf("invalid option %q specified - volumes from another container can only use z,ro,rw options", opt)
+ return nil, nil, fmt.Errorf("invalid option %q specified - volumes from another container can only use z,ro,rw options", opt)
}
}
options = splitOpts
@@ -269,7 +269,7 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s
ctr, err := runtime.LookupContainer(splitVol[0])
if err != nil {
- return nil, nil, errors.Wrapf(err, "error looking up container %q for volumes-from", splitVol[0])
+ return nil, nil, fmt.Errorf("error looking up container %q for volumes-from: %w", splitVol[0], err)
}
logrus.Debugf("Adding volumes from container %s", ctr.ID())
@@ -290,7 +290,7 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s
// and append them in if we can find them.
spec := ctr.Spec()
if spec == nil {
- return nil, nil, errors.Errorf("retrieving container %s spec for volumes-from", ctr.ID())
+ return nil, nil, fmt.Errorf("retrieving container %s spec for volumes-from", ctr.ID())
}
for _, mnt := range spec.Mounts {
if mnt.Type != define.TypeBind {
@@ -364,16 +364,16 @@ func addContainerInitBinary(s *specgen.SpecGenerator, path string) (spec.Mount,
}
if path == "" {
- return mount, fmt.Errorf("please specify a path to the container-init binary")
+ return mount, errors.New("please specify a path to the container-init binary")
}
if !s.PidNS.IsPrivate() {
- return mount, fmt.Errorf("cannot add init binary as PID 1 (PID namespace isn't private)")
+ return mount, errors.New("cannot add init binary as PID 1 (PID namespace isn't private)")
}
if s.Systemd == "always" {
- return mount, fmt.Errorf("cannot use container-init binary with systemd=always")
+ return mount, errors.New("cannot use container-init binary with systemd=always")
}
if _, err := os.Stat(path); os.IsNotExist(err) {
- return mount, errors.Wrap(err, "container-init binary not found on the host")
+ return mount, fmt.Errorf("container-init binary not found on the host: %w", err)
}
return mount, nil
}
diff --git a/pkg/specgen/generate/validate.go b/pkg/specgen/generate/validate.go
index 44c7818e7..9c933d747 100644
--- a/pkg/specgen/generate/validate.go
+++ b/pkg/specgen/generate/validate.go
@@ -1,6 +1,9 @@
package generate
import (
+ "errors"
+ "fmt"
+ "io/ioutil"
"os"
"path/filepath"
@@ -8,7 +11,6 @@ import (
"github.com/containers/common/pkg/sysinfo"
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/utils"
- "github.com/pkg/errors"
)
// Verify resource limits are sanely set when running on cgroup v1.
@@ -22,7 +24,7 @@ func verifyContainerResourcesCgroupV1(s *specgen.SpecGenerator) ([]string, error
}
if s.ResourceLimits.Unified != nil {
- return nil, errors.New("Cannot use --cgroup-conf without cgroup v2")
+ return nil, errors.New("cannot use --cgroup-conf without cgroup v2")
}
// Memory checks
@@ -48,7 +50,7 @@ func verifyContainerResourcesCgroupV1(s *specgen.SpecGenerator) ([]string, error
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.")
memory.Swappiness = nil
} else if *memory.Swappiness > 100 {
- return warnings, errors.Errorf("invalid value: %v, valid memory swappiness range is 0-100", *memory.Swappiness)
+ return warnings, fmt.Errorf("invalid value: %v, valid memory swappiness range is 0-100", *memory.Swappiness)
}
}
if memory.Reservation != nil && !sysInfo.MemoryReservation {
@@ -103,18 +105,18 @@ func verifyContainerResourcesCgroupV1(s *specgen.SpecGenerator) ([]string, error
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(cpu.Cpus)
if err != nil {
- return warnings, errors.Errorf("invalid value %s for cpuset cpus", cpu.Cpus)
+ return warnings, fmt.Errorf("invalid value %s for cpuset cpus", cpu.Cpus)
}
if !cpusAvailable {
- return warnings, errors.Errorf("requested CPUs are not available - requested %s, available: %s", cpu.Cpus, sysInfo.Cpus)
+ return warnings, fmt.Errorf("requested CPUs are not available - requested %s, available: %s", cpu.Cpus, sysInfo.Cpus)
}
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(cpu.Mems)
if err != nil {
- return warnings, errors.Errorf("invalid value %s for cpuset mems", cpu.Mems)
+ return warnings, fmt.Errorf("invalid value %s for cpuset mems", cpu.Mems)
}
if !memsAvailable {
- return warnings, errors.Errorf("requested memory nodes are not available - requested %s, available: %s", cpu.Mems, sysInfo.Mems)
+ return warnings, fmt.Errorf("requested memory nodes are not available - requested %s, available: %s", cpu.Mems, sysInfo.Mems)
}
}
@@ -166,6 +168,14 @@ func verifyContainerResourcesCgroupV2(s *specgen.SpecGenerator) ([]string, error
if err != nil {
return warnings, err
}
+
+ if own == "/" {
+ // If running under the root cgroup try to create or reuse a "probe" cgroup to read memory values
+ own = "podman_probe"
+ _ = os.MkdirAll(filepath.Join("/sys/fs/cgroup", own), 0o755)
+ _ = ioutil.WriteFile("/sys/fs/cgroup/cgroup.subtree_control", []byte("+memory"), 0o644)
+ }
+
memoryMax := filepath.Join("/sys/fs/cgroup", own, "memory.max")
memorySwapMax := filepath.Join("/sys/fs/cgroup", own, "memory.swap.max")
_, errMemoryMax := os.Stat(memoryMax)
diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go
index 7a7ca2706..03a2049f6 100644
--- a/pkg/specgen/namespaces.go
+++ b/pkg/specgen/namespaces.go
@@ -1,6 +1,7 @@
package specgen
import (
+ "errors"
"fmt"
"net"
"os"
@@ -8,13 +9,12 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/cgroups"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
)
type NamespaceMode string
@@ -158,18 +158,18 @@ func validateNetNS(n *Namespace) error {
case "", Default, Host, Path, FromContainer, FromPod, Private, NoNetwork, Bridge:
break
default:
- return errors.Errorf("invalid network %q", n.NSMode)
+ return fmt.Errorf("invalid network %q", n.NSMode)
}
// Path and From Container MUST have a string value set
if n.NSMode == Path || n.NSMode == FromContainer {
if len(n.Value) < 1 {
- return errors.Errorf("namespace mode %s requires a value", n.NSMode)
+ return fmt.Errorf("namespace mode %s requires a value", n.NSMode)
}
} else if n.NSMode != Slirp {
// All others except must NOT set a string value
if len(n.Value) > 0 {
- return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode)
+ return fmt.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode)
}
}
@@ -197,20 +197,20 @@ func (n *Namespace) validate() error {
case "", Default, Host, Path, FromContainer, FromPod, Private:
// Valid, do nothing
case NoNetwork, Bridge, Slirp:
- return errors.Errorf("cannot use network modes with non-network namespace")
+ return errors.New("cannot use network modes with non-network namespace")
default:
- return errors.Errorf("invalid namespace type %s specified", n.NSMode)
+ return fmt.Errorf("invalid namespace type %s specified", n.NSMode)
}
// Path and From Container MUST have a string value set
if n.NSMode == Path || n.NSMode == FromContainer {
if len(n.Value) < 1 {
- return errors.Errorf("namespace mode %s requires a value", n.NSMode)
+ return fmt.Errorf("namespace mode %s requires a value", n.NSMode)
}
} else {
// All others must NOT set a string value
if len(n.Value) > 0 {
- return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode)
+ return fmt.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode)
}
}
return nil
@@ -231,19 +231,19 @@ func ParseNamespace(ns string) (Namespace, error) {
case strings.HasPrefix(ns, "ns:"):
split := strings.SplitN(ns, ":", 2)
if len(split) != 2 {
- return toReturn, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"")
+ return toReturn, fmt.Errorf("must provide a path to a namespace when specifying \"ns:\"")
}
toReturn.NSMode = Path
toReturn.Value = split[1]
case strings.HasPrefix(ns, "container:"):
split := strings.SplitN(ns, ":", 2)
if len(split) != 2 {
- return toReturn, errors.Errorf("must provide name or ID or a container when specifying \"container:\"")
+ return toReturn, fmt.Errorf("must provide name or ID or a container when specifying \"container:\"")
}
toReturn.NSMode = FromContainer
toReturn.Value = split[1]
default:
- return toReturn, errors.Errorf("unrecognized namespace mode %s passed", ns)
+ return toReturn, fmt.Errorf("unrecognized namespace mode %s passed", ns)
}
return toReturn, nil
@@ -266,7 +266,7 @@ func ParseCgroupNamespace(ns string) (Namespace, error) {
case "private", "":
toReturn.NSMode = Private
default:
- return toReturn, errors.Errorf("unrecognized cgroup namespace mode %s passed", ns)
+ return toReturn, fmt.Errorf("unrecognized cgroup namespace mode %s passed", ns)
}
} else {
toReturn.NSMode = Host
@@ -300,7 +300,7 @@ func ParseUserNamespace(ns string) (Namespace, error) {
case strings.HasPrefix(ns, "auto:"):
split := strings.SplitN(ns, ":", 2)
if len(split) != 2 {
- return toReturn, errors.Errorf("invalid setting for auto: mode")
+ return toReturn, errors.New("invalid setting for auto: mode")
}
toReturn.NSMode = Auto
toReturn.Value = split[1]
@@ -318,62 +318,6 @@ func ParseUserNamespace(ns string) (Namespace, error) {
return ParseNamespace(ns)
}
-// ParseNetworkNamespace parses a network namespace specification in string
-// form.
-// Returns a namespace and (optionally) a list of CNI networks to join.
-func ParseNetworkNamespace(ns string, rootlessDefaultCNI bool) (Namespace, map[string]types.PerNetworkOptions, error) {
- toReturn := Namespace{}
- networks := make(map[string]types.PerNetworkOptions)
- // Net defaults to Slirp on rootless
- switch {
- case ns == string(Slirp), strings.HasPrefix(ns, string(Slirp)+":"):
- toReturn.NSMode = Slirp
- case ns == string(FromPod):
- toReturn.NSMode = FromPod
- case ns == "" || ns == string(Default) || ns == string(Private):
- if rootless.IsRootless() {
- if rootlessDefaultCNI {
- toReturn.NSMode = Bridge
- } else {
- toReturn.NSMode = Slirp
- }
- } else {
- toReturn.NSMode = Bridge
- }
- case ns == string(Bridge):
- toReturn.NSMode = Bridge
- case ns == string(NoNetwork):
- toReturn.NSMode = NoNetwork
- case ns == string(Host):
- toReturn.NSMode = Host
- case strings.HasPrefix(ns, "ns:"):
- split := strings.SplitN(ns, ":", 2)
- if len(split) != 2 {
- return toReturn, nil, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"")
- }
- toReturn.NSMode = Path
- toReturn.Value = split[1]
- case strings.HasPrefix(ns, string(FromContainer)+":"):
- split := strings.SplitN(ns, ":", 2)
- if len(split) != 2 {
- return toReturn, nil, errors.Errorf("must provide name or ID or a container when specifying \"container:\"")
- }
- toReturn.NSMode = FromContainer
- toReturn.Value = split[1]
- default:
- // Assume we have been given a list of CNI networks.
- // Which only works in bridge mode, so set that.
- networkList := strings.Split(ns, ",")
- for _, net := range networkList {
- networks[net] = types.PerNetworkOptions{}
- }
-
- toReturn.NSMode = Bridge
- }
-
- return toReturn, networks, nil
-}
-
// ParseNetworkFlag parses a network string slice into the network options
// If the input is nil or empty it will use the default setting from containers.conf
func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) {
@@ -399,13 +343,7 @@ func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetwork
case ns == string(FromPod):
toReturn.NSMode = FromPod
case ns == "" || ns == string(Default) || ns == string(Private):
- // Net defaults to Slirp on rootless
- if rootless.IsRootless() {
- toReturn.NSMode = Slirp
- break
- }
- // if root we use bridge
- fallthrough
+ toReturn.NSMode = Private
case ns == string(Bridge), strings.HasPrefix(ns, string(Bridge)+":"):
toReturn.NSMode = Bridge
parts := strings.SplitN(ns, ":", 2)
@@ -427,14 +365,14 @@ func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetwork
case strings.HasPrefix(ns, "ns:"):
split := strings.SplitN(ns, ":", 2)
if len(split) != 2 {
- return toReturn, nil, nil, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"")
+ return toReturn, nil, nil, errors.New("must provide a path to a namespace when specifying \"ns:\"")
}
toReturn.NSMode = Path
toReturn.Value = split[1]
case strings.HasPrefix(ns, string(FromContainer)+":"):
split := strings.SplitN(ns, ":", 2)
if len(split) != 2 {
- return toReturn, nil, nil, errors.Errorf("must provide name or ID or a container when specifying \"container:\"")
+ return toReturn, nil, nil, errors.New("must provide name or ID or a container when specifying \"container:\"")
}
toReturn.NSMode = FromContainer
toReturn.Value = split[1]
@@ -453,7 +391,7 @@ func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetwork
}
netOpts, err := parseBridgeNetworkOptions(parts[1])
if err != nil {
- return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0])
+ return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", parts[0], err)
}
podmanNetworks[parts[0]] = netOpts
}
@@ -464,24 +402,24 @@ func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetwork
if len(networks) > 1 {
if !toReturn.IsBridge() {
- return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "cannot set multiple networks without bridge network mode, selected mode %s", toReturn.NSMode)
+ return toReturn, nil, nil, fmt.Errorf("cannot set multiple networks without bridge network mode, selected mode %s: %w", toReturn.NSMode, define.ErrInvalidArg)
}
for _, network := range networks[1:] {
parts := strings.SplitN(network, ":", 2)
if parts[0] == "" {
- return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "network name cannot be empty")
+ return toReturn, nil, nil, fmt.Errorf("network name cannot be empty: %w", define.ErrInvalidArg)
}
- if util.StringInSlice(parts[0], []string{string(Bridge), string(Slirp), string(FromPod), string(NoNetwork),
+ if cutil.StringInSlice(parts[0], []string{string(Bridge), string(Slirp), string(FromPod), string(NoNetwork),
string(Default), string(Private), string(Path), string(FromContainer), string(Host)}) {
- return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "can only set extra network names, selected mode %s conflicts with bridge", parts[0])
+ return toReturn, nil, nil, fmt.Errorf("can only set extra network names, selected mode %s conflicts with bridge: %w", parts[0], define.ErrInvalidArg)
}
netOpts := types.PerNetworkOptions{}
if len(parts) > 1 {
var err error
netOpts, err = parseBridgeNetworkOptions(parts[1])
if err != nil {
- return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0])
+ return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", parts[0], err)
}
}
podmanNetworks[parts[0]] = netOpts
@@ -503,7 +441,7 @@ func parseBridgeNetworkOptions(opts string) (types.PerNetworkOptions, error) {
case "ip", "ip6":
ip := net.ParseIP(split[1])
if ip == nil {
- return netOpts, errors.Errorf("invalid ip address %q", split[1])
+ return netOpts, fmt.Errorf("invalid ip address %q", split[1])
}
netOpts.StaticIPs = append(netOpts.StaticIPs, ip)
@@ -527,7 +465,7 @@ func parseBridgeNetworkOptions(opts string) (types.PerNetworkOptions, error) {
netOpts.InterfaceName = split[1]
default:
- return netOpts, errors.Errorf("unknown bridge network option: %s", split[0])
+ return netOpts, fmt.Errorf("unknown bridge network option: %s", split[0])
}
}
return netOpts, nil
@@ -539,7 +477,7 @@ func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *gene
switch userns.NSMode {
case Path:
if _, err := os.Stat(userns.Value); err != nil {
- return user, errors.Wrap(err, "cannot find specified user namespace path")
+ return user, fmt.Errorf("cannot find specified user namespace path: %w", err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), userns.Value); err != nil {
return user, err
@@ -588,7 +526,7 @@ func privateUserNamespace(idmappings *storage.IDMappingOptions, g *generate.Gene
return err
}
if idmappings == nil || (len(idmappings.UIDMap) == 0 && len(idmappings.GIDMap) == 0) {
- return errors.Errorf("must provide at least one UID or GID mapping to configure a user namespace")
+ return errors.New("must provide at least one UID or GID mapping to configure a user namespace")
}
for _, uidmap := range idmappings.UIDMap {
g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
diff --git a/pkg/specgen/namespaces_test.go b/pkg/specgen/namespaces_test.go
index 368c92bd5..d03a6d032 100644
--- a/pkg/specgen/namespaces_test.go
+++ b/pkg/specgen/namespaces_test.go
@@ -5,7 +5,6 @@ import (
"testing"
"github.com/containers/common/libnetwork/types"
- "github.com/containers/podman/v4/pkg/rootless"
"github.com/stretchr/testify/assert"
)
@@ -17,14 +16,6 @@ func parsMacNoErr(mac string) types.HardwareAddr {
func TestParseNetworkFlag(t *testing.T) {
// root and rootless have different defaults
defaultNetName := "default"
- defaultNetworks := map[string]types.PerNetworkOptions{
- defaultNetName: {},
- }
- defaultNsMode := Namespace{NSMode: Bridge}
- if rootless.IsRootless() {
- defaultNsMode = Namespace{NSMode: Slirp}
- defaultNetworks = map[string]types.PerNetworkOptions{}
- }
tests := []struct {
name string
@@ -37,26 +28,26 @@ func TestParseNetworkFlag(t *testing.T) {
{
name: "empty input",
args: nil,
- nsmode: defaultNsMode,
- networks: defaultNetworks,
+ nsmode: Namespace{NSMode: Private},
+ networks: map[string]types.PerNetworkOptions{},
},
{
name: "empty string as input",
args: []string{},
- nsmode: defaultNsMode,
- networks: defaultNetworks,
+ nsmode: Namespace{NSMode: Private},
+ networks: map[string]types.PerNetworkOptions{},
},
{
name: "default mode",
args: []string{"default"},
- nsmode: defaultNsMode,
- networks: defaultNetworks,
+ nsmode: Namespace{NSMode: Private},
+ networks: map[string]types.PerNetworkOptions{},
},
{
name: "private mode",
args: []string{"private"},
- nsmode: defaultNsMode,
- networks: defaultNetworks,
+ nsmode: Namespace{NSMode: Private},
+ networks: map[string]types.PerNetworkOptions{},
},
{
name: "bridge mode",
diff --git a/pkg/specgen/pod_validate.go b/pkg/specgen/pod_validate.go
index 8d971a25e..724be213f 100644
--- a/pkg/specgen/pod_validate.go
+++ b/pkg/specgen/pod_validate.go
@@ -1,8 +1,10 @@
package specgen
import (
+ "errors"
+ "fmt"
+
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
var (
@@ -13,7 +15,7 @@ var (
)
func exclusivePodOptions(opt1, opt2 string) error {
- return errors.Wrapf(ErrInvalidPodSpecConfig, "%s and %s are mutually exclusive pod options", opt1, opt2)
+ return fmt.Errorf("%s and %s are mutually exclusive pod options: %w", opt1, opt2, ErrInvalidPodSpecConfig)
}
// Validate verifies the input is valid
diff --git a/pkg/specgen/podspecgen.go b/pkg/specgen/podspecgen.go
index 603506241..64a79f4ee 100644
--- a/pkg/specgen/podspecgen.go
+++ b/pkg/specgen/podspecgen.go
@@ -4,6 +4,7 @@ import (
"net"
"github.com/containers/common/libnetwork/types"
+ storageTypes "github.com/containers/storage/types"
spec "github.com/opencontainers/runtime-spec/specs-go"
)
@@ -76,6 +77,8 @@ type PodBasicConfig struct {
// Any containers created within the pod will inherit the pod's userns settings.
// Optional
Userns Namespace `json:"userns,omitempty"`
+ // UtsNs is used to indicate the UTS mode the pod is in
+ UtsNs Namespace `json:"utsns,omitempty"`
// Devices contains user specified Devices to be added to the Pod
Devices []string `json:"pod_devices,omitempty"`
// Sysctl sets kernel parameters for the pod
@@ -182,6 +185,10 @@ type PodStorageConfig struct {
// comma-separated options. Valid options are 'ro', 'rw', and 'z'.
// Options will be used for all volumes sourced from the container.
VolumesFrom []string `json:"volumes_from,omitempty"`
+ // ShmSize is the size of the tmpfs to mount in at /dev/shm, in bytes.
+ // Conflicts with ShmSize if IpcNS is not private.
+ // Optional.
+ ShmSize *int64 `json:"shm_size,omitempty"`
}
// PodCgroupConfig contains configuration options about a pod's cgroups.
@@ -222,6 +229,10 @@ type PodResourceConfig struct {
type PodSecurityConfig struct {
SecurityOpt []string `json:"security_opt,omitempty"`
+ // IDMappings are UID and GID mappings that will be used by user
+ // namespaces.
+ // Required if UserNS is private.
+ IDMappings *storageTypes.IDMappingOptions `json:"idmappings,omitempty"`
}
// NewPodSpecGenerator creates a new pod spec
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index 79e20667b..c31c3f035 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -1,6 +1,7 @@
package specgen
import (
+ "errors"
"net"
"strings"
"syscall"
@@ -10,7 +11,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/storage/types"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// LogConfig describes the logging characteristics for a container
@@ -103,6 +103,12 @@ type ContainerBasicConfig struct {
// RawImageName is the user-specified and unprocessed input referring
// to a local or a remote image.
RawImageName string `json:"raw_image_name,omitempty"`
+ // ImageOS is the user-specified image OS
+ ImageOS string `json:"image_os,omitempty"`
+ // ImageArch is the user-specified image architecture
+ ImageArch string `json:"image_arch,omitempty"`
+ // ImageVariant is the user-specified image variant
+ ImageVariant string `json:"image_variant,omitempty"`
// RestartPolicy is the container's restart policy - an action which
// will be taken when the container exits.
// If not given, the default policy, which does nothing, will be used.
@@ -565,10 +571,10 @@ type Secret struct {
var (
// ErrNoStaticIPRootless is used when a rootless user requests to assign a static IP address
// to a pod or container
- ErrNoStaticIPRootless error = errors.New("rootless containers and pods cannot be assigned static IP addresses")
+ ErrNoStaticIPRootless = errors.New("rootless containers and pods cannot be assigned static IP addresses")
// ErrNoStaticMACRootless is used when a rootless user requests to assign a static MAC address
// to a pod or container
- ErrNoStaticMACRootless error = errors.New("rootless containers and pods cannot be assigned static MAC addresses")
+ ErrNoStaticMACRootless = errors.New("rootless containers and pods cannot be assigned static MAC addresses")
)
// NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs
diff --git a/pkg/specgen/volumes.go b/pkg/specgen/volumes.go
index b26666df3..84de4fdd1 100644
--- a/pkg/specgen/volumes.go
+++ b/pkg/specgen/volumes.go
@@ -1,11 +1,13 @@
package specgen
import (
+ "errors"
+ "fmt"
+ "path/filepath"
"strings"
"github.com/containers/common/pkg/parse"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -36,7 +38,7 @@ type OverlayVolume struct {
// ImageVolume is a volume based on a container image. The container image is
// first mounted on the host and is then bind-mounted into the container. An
-// ImageVolume is always mounted read only.
+// ImageVolume is always mounted read-only.
type ImageVolume struct {
// Source is the source of the image volume. The image can be referred
// to by name and by ID.
@@ -49,14 +51,13 @@ type ImageVolume struct {
// GenVolumeMounts parses user input into mounts, volumes and overlay volumes
func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*NamedVolume, map[string]*OverlayVolume, error) {
- errDuplicateDest := errors.Errorf("duplicate mount destination")
+ errDuplicateDest := errors.New("duplicate mount destination")
mounts := make(map[string]spec.Mount)
volumes := make(map[string]*NamedVolume)
overlayVolumes := make(map[string]*OverlayVolume)
- volumeFormatErr := errors.Errorf("incorrect volume format, should be [host-dir:]ctr-dir[:option]")
-
+ volumeFormatErr := errors.New("incorrect volume format, should be [host-dir:]ctr-dir[:option]")
for _, vol := range volumeFlag {
var (
options []string
@@ -67,10 +68,24 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na
splitVol := SplitVolumeString(vol)
if len(splitVol) > 3 {
- return nil, nil, nil, errors.Wrapf(volumeFormatErr, vol)
+ return nil, nil, nil, fmt.Errorf("%v: %w", vol, volumeFormatErr)
}
src = splitVol[0]
+
+ // Support relative paths beginning with ./
+ if strings.HasPrefix(src, "./") {
+ path, err := filepath.EvalSymlinks(src)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ src, err = filepath.Abs(path)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ splitVol[0] = src
+ }
+
if len(splitVol) == 1 {
// This is an anonymous named volume. Only thing given
// is destination.
@@ -97,6 +112,8 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na
// This is not a named volume
overlayFlag := false
chownFlag := false
+ upperDirFlag := false
+ workDirFlag := false
for _, o := range options {
if o == "O" {
overlayFlag = true
@@ -105,8 +122,16 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na
if strings.Contains(joinedOpts, "U") {
chownFlag = true
}
-
- if len(options) > 2 || (len(options) == 2 && !chownFlag) {
+ if strings.Contains(joinedOpts, "upperdir") {
+ upperDirFlag = true
+ }
+ if strings.Contains(joinedOpts, "workdir") {
+ workDirFlag = true
+ }
+ if (workDirFlag && !upperDirFlag) || (!workDirFlag && upperDirFlag) {
+ return nil, nil, nil, errors.New("must set both `upperdir` and `workdir`")
+ }
+ if len(options) > 2 && !(len(options) == 3 && upperDirFlag && workDirFlag) || (len(options) == 2 && !chownFlag) {
return nil, nil, nil, errors.New("can't use 'O' with other options")
}
}
@@ -115,11 +140,17 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na
// This is a overlay volume
newOverlayVol := new(OverlayVolume)
newOverlayVol.Destination = dest
- newOverlayVol.Source = src
+ // convert src to absolute path so we don't end up passing
+ // relative values as lowerdir for overlay mounts
+ source, err := filepath.Abs(src)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed while resolving absolute path for source %v for overlay mount: %w", src, err)
+ }
+ newOverlayVol.Source = source
newOverlayVol.Options = options
if _, ok := overlayVolumes[newOverlayVol.Destination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, newOverlayVol.Destination)
+ return nil, nil, nil, fmt.Errorf("%v: %w", newOverlayVol.Destination, errDuplicateDest)
}
overlayVolumes[newOverlayVol.Destination] = newOverlayVol
} else {
@@ -130,7 +161,7 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na
Options: options,
}
if _, ok := mounts[newMount.Destination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, newMount.Destination)
+ return nil, nil, nil, fmt.Errorf("%v: %w", newMount.Destination, errDuplicateDest)
}
mounts[newMount.Destination] = newMount
}
@@ -142,7 +173,7 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na
newNamedVol.Options = options
if _, ok := volumes[newNamedVol.Dest]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, newNamedVol.Dest)
+ return nil, nil, nil, fmt.Errorf("%v: %w", newNamedVol.Dest, errDuplicateDest)
}
volumes[newNamedVol.Dest] = newNamedVol
}
diff --git a/pkg/specgen/winpath.go b/pkg/specgen/winpath.go
index 0df4ebdd7..5c19aeb4b 100644
--- a/pkg/specgen/winpath.go
+++ b/pkg/specgen/winpath.go
@@ -1,11 +1,10 @@
package specgen
import (
+ "errors"
"fmt"
"strings"
"unicode"
-
- "github.com/pkg/errors"
)
func isHostWinPath(path string) bool {
diff --git a/pkg/specgenutil/createparse.go b/pkg/specgenutil/createparse.go
index fb5f9c351..373fb6faa 100644
--- a/pkg/specgenutil/createparse.go
+++ b/pkg/specgenutil/createparse.go
@@ -1,9 +1,10 @@
package specgenutil
import (
+ "errors"
+
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/pkg/errors"
)
// validate determines if the flags and values given by the user are valid. things checked
@@ -11,27 +12,12 @@ import (
func validate(c *entities.ContainerCreateOptions) error {
var ()
if c.Rm && (c.Restart != "" && c.Restart != "no" && c.Restart != "on-failure") {
- return errors.Errorf(`the --rm option conflicts with --restart, when the restartPolicy is not "" and "no"`)
+ return errors.New(`the --rm option conflicts with --restart, when the restartPolicy is not "" and "no"`)
}
if _, err := config.ParsePullPolicy(c.Pull); err != nil {
return err
}
- var imageVolType = map[string]string{
- "bind": "",
- "tmpfs": "",
- "ignore": "",
- }
- if _, ok := imageVolType[c.ImageVolume]; !ok {
- switch {
- case c.IsInfra:
- c.ImageVolume = "bind"
- case c.IsClone: // the image volume type will be deduced later from the container we are cloning
- return nil
- default:
- return errors.Errorf("invalid image-volume type %q. Pick one of bind, tmpfs, or ignore", c.ImageVolume)
- }
- }
- return nil
+ return config.ValidateImageVolumeMode(c.ImageVolume)
}
diff --git a/pkg/specgenutil/ports.go b/pkg/specgenutil/ports.go
index 6cc4de1ed..23ee9b4d3 100644
--- a/pkg/specgenutil/ports.go
+++ b/pkg/specgenutil/ports.go
@@ -1,8 +1,9 @@
package specgenutil
import (
+ "fmt"
+
"github.com/docker/go-connections/nat"
- "github.com/pkg/errors"
)
func verifyExpose(expose []string) error {
@@ -15,7 +16,7 @@ func verifyExpose(expose []string) error {
// if expose a port, the start and end port are the same
_, _, err := nat.ParsePortRange(port)
if err != nil {
- return errors.Wrapf(err, "invalid range format for --expose: %s", expose)
+ return fmt.Errorf("invalid range format for --expose: %s: %w", expose, err)
}
}
return nil
diff --git a/pkg/specgenutil/specgen.go b/pkg/specgenutil/specgen.go
index 9cb2f200b..9a7d50947 100644
--- a/pkg/specgenutil/specgen.go
+++ b/pkg/specgenutil/specgen.go
@@ -2,6 +2,7 @@ package specgenutil
import (
"encoding/json"
+ "errors"
"fmt"
"os"
"strconv"
@@ -22,7 +23,6 @@ import (
"github.com/docker/docker/opts"
"github.com/docker/go-units"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
func getCPULimits(c *entities.ContainerCreateOptions) *specs.LinuxCPU {
@@ -78,7 +78,7 @@ func getIOLimits(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) (
if b := c.BlkIOWeight; len(b) > 0 {
u, err := strconv.ParseUint(b, 10, 16)
if err != nil {
- return nil, errors.Wrapf(err, "invalid value for blkio-weight")
+ return nil, fmt.Errorf("invalid value for blkio-weight: %w", err)
}
nu := uint16(u)
io.Weight = &nu
@@ -143,7 +143,7 @@ func getMemoryLimits(c *entities.ContainerCreateOptions) (*specs.LinuxMemory, er
if m := c.Memory; len(m) > 0 {
ml, err := units.RAMInBytes(m)
if err != nil {
- return nil, errors.Wrapf(err, "invalid value for memory")
+ return nil, fmt.Errorf("invalid value for memory: %w", err)
}
LimitToSwap(memory, c.MemorySwap, ml)
hasLimits = true
@@ -151,7 +151,7 @@ func getMemoryLimits(c *entities.ContainerCreateOptions) (*specs.LinuxMemory, er
if m := c.MemoryReservation; len(m) > 0 {
mr, err := units.RAMInBytes(m)
if err != nil {
- return nil, errors.Wrapf(err, "invalid value for memory")
+ return nil, fmt.Errorf("invalid value for memory: %w", err)
}
memory.Reservation = &mr
hasLimits = true
@@ -164,7 +164,7 @@ func getMemoryLimits(c *entities.ContainerCreateOptions) (*specs.LinuxMemory, er
ms, err = units.RAMInBytes(m)
memory.Swap = &ms
if err != nil {
- return nil, errors.Wrapf(err, "invalid value for memory")
+ return nil, fmt.Errorf("invalid value for memory: %w", err)
}
hasLimits = true
}
@@ -229,9 +229,11 @@ func setNamespaces(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions)
}
func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions, args []string) error {
- var (
- err error
- )
+ rtc, err := config.Default()
+ if err != nil {
+ return err
+ }
+
// validate flags as needed
if err := validate(c); err != nil {
return err
@@ -246,7 +248,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if len(c.HealthCmd) > 0 {
if c.NoHealthCheck {
- return errors.New("Cannot specify both --no-healthcheck and --health-cmd")
+ return errors.New("cannot specify both --no-healthcheck and --health-cmd")
}
s.HealthConfig, err = makeHealthCheckFromCli(c.HealthCmd, c.HealthInterval, c.HealthRetries, c.HealthTimeout, c.HealthStartPeriod)
if err != nil {
@@ -310,13 +312,13 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.PublishExposedPorts = c.PublishAll
}
- if len(s.Pod) == 0 {
+ if len(s.Pod) == 0 || len(c.Pod) > 0 {
s.Pod = c.Pod
}
if len(c.PodIDFile) > 0 {
if len(s.Pod) > 0 {
- return errors.New("Cannot specify both --pod and --pod-id-file")
+ return errors.New("cannot specify both --pod and --pod-id-file")
}
podID, err := ReadPodIDFile(c.PodIDFile)
if err != nil {
@@ -355,7 +357,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
// any case.
osEnv, err := envLib.ParseSlice(os.Environ())
if err != nil {
- return errors.Wrap(err, "error parsing host environment variables")
+ return fmt.Errorf("error parsing host environment variables: %w", err)
}
if !s.EnvHost {
@@ -388,7 +390,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
// LABEL VARIABLES
labels, err := parse.GetAllLabels(c.LabelFile, c.Label)
if err != nil {
- return errors.Wrapf(err, "unable to process labels")
+ return fmt.Errorf("unable to process labels: %w", err)
}
if systemdUnit, exists := osEnv[systemdDefine.EnvVariable]; exists {
@@ -412,7 +414,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
for _, annotation := range c.Annotation {
splitAnnotation := strings.SplitN(annotation, "=", 2)
if len(splitAnnotation) < 2 {
- return errors.Errorf("Annotations must be formatted KEY=VALUE")
+ return errors.New("annotations must be formatted KEY=VALUE")
}
annotations[splitAnnotation[0]] = splitAnnotation[1]
}
@@ -425,7 +427,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
for _, opt := range c.StorageOpts {
split := strings.SplitN(opt, "=", 2)
if len(split) != 2 {
- return errors.Errorf("storage-opt must be formatted KEY=VALUE")
+ return errors.New("storage-opt must be formatted KEY=VALUE")
}
opts[split[0]] = split[1]
}
@@ -457,7 +459,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if c.ShmSize != "" {
var m opts.MemBytes
if err := m.Set(c.ShmSize); err != nil {
- return errors.Wrapf(err, "unable to translate --shm-size")
+ return fmt.Errorf("unable to translate --shm-size: %w", err)
}
val := m.Value()
s.ShmSize = &val
@@ -479,8 +481,13 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if len(s.HostUsers) == 0 || len(c.HostUsers) != 0 {
s.HostUsers = c.HostUsers
}
- if len(s.ImageVolumeMode) == 0 || len(c.ImageVolume) != 0 {
- s.ImageVolumeMode = c.ImageVolume
+ if len(c.ImageVolume) != 0 {
+ if len(s.ImageVolumeMode) == 0 {
+ s.ImageVolumeMode = c.ImageVolume
+ }
+ }
+ if len(s.ImageVolumeMode) == 0 {
+ s.ImageVolumeMode = rtc.Engine.ImageVolumeMode
}
if s.ImageVolumeMode == "bind" {
s.ImageVolumeMode = "anonymous"
@@ -524,7 +531,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
for _, unified := range c.CgroupConf {
splitUnified := strings.SplitN(unified, "=", 2)
if len(splitUnified) < 2 {
- return errors.Errorf("--cgroup-conf must be formatted KEY=VALUE")
+ return errors.New("--cgroup-conf must be formatted KEY=VALUE")
}
unifieds[splitUnified[0]] = splitUnified[1]
}
@@ -550,11 +557,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.CgroupsMode = c.CgroupsMode
}
if s.CgroupsMode == "" {
- rtc, err := config.Default()
- if err != nil {
- return err
- }
-
s.CgroupsMode = rtc.Cgroups()
}
@@ -606,7 +608,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
for _, ctl := range c.Sysctl {
splitCtl := strings.SplitN(ctl, "=", 2)
if len(splitCtl) < 2 {
- return errors.Errorf("invalid sysctl value %q", ctl)
+ return fmt.Errorf("invalid sysctl value %q", ctl)
}
sysmap[splitCtl[0]] = splitCtl[1]
}
@@ -622,7 +624,14 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if opt == "no-new-privileges" {
s.ContainerSecurityConfig.NoNewPrivileges = true
} else {
- con := strings.SplitN(opt, "=", 2)
+ // Docker deprecated the ":" syntax but still supports it,
+ // so we need to as well
+ var con []string
+ if strings.Contains(opt, "=") {
+ con = strings.SplitN(opt, "=", 2)
+ } else {
+ con = strings.SplitN(opt, ":", 2)
+ }
if len(con) != 2 {
return fmt.Errorf("invalid --security-opt 1: %q", opt)
}
@@ -650,6 +659,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
}
case "unmask":
s.ContainerSecurityConfig.Unmask = append(s.ContainerSecurityConfig.Unmask, con[1:]...)
+ case "no-new-privileges":
+ noNewPrivileges, err := strconv.ParseBool(con[1])
+ if err != nil {
+ return fmt.Errorf("invalid --security-opt 2: %q", opt)
+ }
+ s.ContainerSecurityConfig.NoNewPrivileges = noNewPrivileges
default:
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
@@ -716,7 +731,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
}
ul, err := units.ParseUlimit(u)
if err != nil {
- return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u)
+ return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
}
rl := specs.POSIXRlimit{
Type: ul.Name,
@@ -730,7 +745,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
for _, o := range c.LogOptions {
split := strings.SplitN(o, "=", 2)
if len(split) < 2 {
- return errors.Errorf("invalid log option %q", o)
+ return fmt.Errorf("invalid log option %q", o)
}
switch strings.ToLower(split[0]) {
case "driver":
@@ -767,19 +782,19 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
// No retries specified
case 2:
if strings.ToLower(splitRestart[0]) != "on-failure" {
- return errors.Errorf("restart policy retries can only be specified with on-failure restart policy")
+ return errors.New("restart policy retries can only be specified with on-failure restart policy")
}
retries, err := strconv.Atoi(splitRestart[1])
if err != nil {
- return errors.Wrapf(err, "error parsing restart policy retry count")
+ return fmt.Errorf("error parsing restart policy retry count: %w", err)
}
if retries < 0 {
- return errors.Errorf("must specify restart policy retry count as a number greater than 0")
+ return errors.New("must specify restart policy retry count as a number greater than 0")
}
var retriesUint = uint(retries)
s.RestartRetries = &retriesUint
default:
- return errors.Errorf("invalid restart policy: may specify retries at most once")
+ return errors.New("invalid restart policy: may specify retries at most once")
}
s.RestartPolicy = splitRestart[0]
}
@@ -854,27 +869,27 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
}
// Every healthcheck requires a command
if len(cmdArr) == 0 {
- return nil, errors.New("Must define a healthcheck command for all healthchecks")
+ return nil, errors.New("must define a healthcheck command for all healthchecks")
}
var concat string
- if cmdArr[0] == "CMD" || cmdArr[0] == "none" { // this is for compat, we are already split properly for most compat cases
+ if strings.ToUpper(cmdArr[0]) == define.HealthConfigTestCmd || strings.ToUpper(cmdArr[0]) == define.HealthConfigTestNone { // this is for compat, we are already split properly for most compat cases
cmdArr = strings.Fields(inCmd)
- } else if cmdArr[0] != "CMD-SHELL" { // this is for podman side of things, won't contain the keywords
+ } else if strings.ToUpper(cmdArr[0]) != define.HealthConfigTestCmdShell { // this is for podman side of things, won't contain the keywords
if isArr && len(cmdArr) > 1 { // an array of consecutive commands
- cmdArr = append([]string{"CMD"}, cmdArr...)
+ cmdArr = append([]string{define.HealthConfigTestCmd}, cmdArr...)
} else { // one singular command
if len(cmdArr) == 1 {
concat = cmdArr[0]
} else {
concat = strings.Join(cmdArr[0:], " ")
}
- cmdArr = append([]string{"CMD-SHELL"}, concat)
+ cmdArr = append([]string{define.HealthConfigTestCmdShell}, concat)
}
}
- if cmdArr[0] == "none" { // if specified to remove healtcheck
- cmdArr = []string{"NONE"}
+ if strings.ToUpper(cmdArr[0]) == define.HealthConfigTestNone { // if specified to remove healtcheck
+ cmdArr = []string{define.HealthConfigTestNone}
}
// healthcheck is by default an array, so we simply pass the user input
@@ -887,7 +902,7 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
}
intervalDuration, err := time.ParseDuration(interval)
if err != nil {
- return nil, errors.Wrapf(err, "invalid healthcheck-interval")
+ return nil, fmt.Errorf("invalid healthcheck-interval: %w", err)
}
hc.Interval = intervalDuration
@@ -898,7 +913,7 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
hc.Retries = int(retries)
timeoutDuration, err := time.ParseDuration(timeout)
if err != nil {
- return nil, errors.Wrapf(err, "invalid healthcheck-timeout")
+ return nil, fmt.Errorf("invalid healthcheck-timeout: %w", err)
}
if timeoutDuration < time.Duration(1) {
return nil, errors.New("healthcheck-timeout must be at least 1 second")
@@ -907,7 +922,7 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
startPeriodDuration, err := time.ParseDuration(startPeriod)
if err != nil {
- return nil, errors.Wrapf(err, "invalid healthcheck-start-period")
+ return nil, fmt.Errorf("invalid healthcheck-start-period: %w", err)
}
if startPeriodDuration < time.Duration(0) {
return nil, errors.New("healthcheck-start-period must be 0 seconds or greater")
@@ -1020,17 +1035,17 @@ func parseSecrets(secrets []string) ([]specgen.Secret, map[string]string, error)
for _, val := range split {
kv := strings.SplitN(val, "=", 2)
if len(kv) < 2 {
- return nil, nil, errors.Wrapf(secretParseError, "option %s must be in form option=value", val)
+ return nil, nil, fmt.Errorf("option %s must be in form option=value: %w", val, secretParseError)
}
switch kv[0] {
case "source":
source = kv[1]
case "type":
if secretType != "" {
- return nil, nil, errors.Wrap(secretParseError, "cannot set more than one secret type")
+ return nil, nil, fmt.Errorf("cannot set more than one secret type: %w", secretParseError)
}
if kv[1] != "mount" && kv[1] != "env" {
- return nil, nil, errors.Wrapf(secretParseError, "type %s is invalid", kv[1])
+ return nil, nil, fmt.Errorf("type %s is invalid: %w", kv[1], secretParseError)
}
secretType = kv[1]
case "target":
@@ -1039,26 +1054,26 @@ func parseSecrets(secrets []string) ([]specgen.Secret, map[string]string, error)
mountOnly = true
mode64, err := strconv.ParseUint(kv[1], 8, 32)
if err != nil {
- return nil, nil, errors.Wrapf(secretParseError, "mode %s invalid", kv[1])
+ return nil, nil, fmt.Errorf("mode %s invalid: %w", kv[1], secretParseError)
}
mode = uint32(mode64)
case "uid", "UID":
mountOnly = true
uid64, err := strconv.ParseUint(kv[1], 10, 32)
if err != nil {
- return nil, nil, errors.Wrapf(secretParseError, "UID %s invalid", kv[1])
+ return nil, nil, fmt.Errorf("UID %s invalid: %w", kv[1], secretParseError)
}
uid = uint32(uid64)
case "gid", "GID":
mountOnly = true
gid64, err := strconv.ParseUint(kv[1], 10, 32)
if err != nil {
- return nil, nil, errors.Wrapf(secretParseError, "GID %s invalid", kv[1])
+ return nil, nil, fmt.Errorf("GID %s invalid: %w", kv[1], secretParseError)
}
gid = uint32(gid64)
default:
- return nil, nil, errors.Wrapf(secretParseError, "option %s invalid", val)
+ return nil, nil, fmt.Errorf("option %s invalid: %w", val, secretParseError)
}
}
@@ -1066,7 +1081,7 @@ func parseSecrets(secrets []string) ([]specgen.Secret, map[string]string, error)
secretType = "mount"
}
if source == "" {
- return nil, nil, errors.Wrapf(secretParseError, "no source found %s", val)
+ return nil, nil, fmt.Errorf("no source found %s: %w", val, secretParseError)
}
if secretType == "mount" {
mountSecret := specgen.Secret{
@@ -1080,7 +1095,7 @@ func parseSecrets(secrets []string) ([]specgen.Secret, map[string]string, error)
}
if secretType == "env" {
if mountOnly {
- return nil, nil, errors.Wrap(secretParseError, "UID, GID, Mode options cannot be set with secret type env")
+ return nil, nil, fmt.Errorf("UID, GID, Mode options cannot be set with secret type env: %w", secretParseError)
}
if target == "" {
target = source
@@ -1119,17 +1134,21 @@ func parseLinuxResourcesDeviceAccess(device string) (specs.LinuxDeviceCgroup, er
}
number := strings.SplitN(value[1], ":", 2)
- i, err := strconv.ParseInt(number[0], 10, 64)
- if err != nil {
- return specs.LinuxDeviceCgroup{}, err
+ if number[0] != "*" {
+ i, err := strconv.ParseUint(number[0], 10, 64)
+ if err != nil {
+ return specs.LinuxDeviceCgroup{}, err
+ }
+ m := int64(i)
+ major = &m
}
- major = &i
if len(number) == 2 && number[1] != "*" {
- i, err := strconv.ParseInt(number[1], 10, 64)
+ i, err := strconv.ParseUint(number[1], 10, 64)
if err != nil {
return specs.LinuxDeviceCgroup{}, err
}
- minor = &i
+ m := int64(i)
+ minor = &m
}
access = value[2]
for _, c := range strings.Split(access, "") {
diff --git a/pkg/specgenutil/specgenutil_test.go b/pkg/specgenutil/specgenutil_test.go
index 5867b0ae0..fb2743f17 100644
--- a/pkg/specgenutil/specgenutil_test.go
+++ b/pkg/specgenutil/specgenutil_test.go
@@ -75,3 +75,82 @@ func TestWinPath(t *testing.T) {
}
}
}
+
+func TestParseLinuxResourcesDeviceAccess(t *testing.T) {
+ d, err := parseLinuxResourcesDeviceAccess("a *:* rwm")
+ assert.Nil(t, err, "err is nil")
+ assert.True(t, d.Allow, "allow is true")
+ assert.Equal(t, d.Type, "a", "type is 'a'")
+ assert.Nil(t, d.Minor, "minor is nil")
+ assert.Nil(t, d.Major, "major is nil")
+
+ d, err = parseLinuxResourcesDeviceAccess("b 3:* rwm")
+ assert.Nil(t, err, "err is nil")
+ assert.True(t, d.Allow, "allow is true")
+ assert.Equal(t, d.Type, "b", "type is 'b'")
+ assert.Nil(t, d.Minor, "minor is nil")
+ assert.NotNil(t, d.Major, "major is not nil")
+ assert.Equal(t, *d.Major, int64(3), "major is 3")
+
+ d, err = parseLinuxResourcesDeviceAccess("a *:3 rwm")
+ assert.Nil(t, err, "err is nil")
+ assert.True(t, d.Allow, "allow is true")
+ assert.Equal(t, d.Type, "a", "type is 'a'")
+ assert.Nil(t, d.Major, "major is nil")
+ assert.NotNil(t, d.Minor, "minor is not nil")
+ assert.Equal(t, *d.Minor, int64(3), "minor is 3")
+
+ d, err = parseLinuxResourcesDeviceAccess("c 1:2 rwm")
+ assert.Nil(t, err, "err is nil")
+ assert.True(t, d.Allow, "allow is true")
+ assert.Equal(t, d.Type, "c", "type is 'c'")
+ assert.NotNil(t, d.Major, "minor is not nil")
+ assert.Equal(t, *d.Major, int64(1), "minor is 1")
+ assert.NotNil(t, d.Minor, "minor is not nil")
+ assert.Equal(t, *d.Minor, int64(2), "minor is 2")
+
+ _, err = parseLinuxResourcesDeviceAccess("q *:* rwm")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a a:* rwm")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a *:a rwm")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a *:* abc")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("* *:* *")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("* *:a2 *")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("*")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("*:*")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a *:*")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a *:*")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a 12a:* r")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a a12:* r")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a 0x1:* r")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a -2:* r")
+ assert.NotNil(t, err, "err is not nil")
+
+ _, err = parseLinuxResourcesDeviceAccess("a *:-3 r")
+ assert.NotNil(t, err, "err is not nil")
+}
diff --git a/pkg/specgenutil/util.go b/pkg/specgenutil/util.go
index fa2e90457..66d148672 100644
--- a/pkg/specgenutil/util.go
+++ b/pkg/specgenutil/util.go
@@ -1,6 +1,8 @@
package specgenutil
import (
+ "errors"
+ "fmt"
"io/ioutil"
"net"
"os"
@@ -10,7 +12,6 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
storageTypes "github.com/containers/storage/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -19,7 +20,7 @@ import (
func ReadPodIDFile(path string) (string, error) {
content, err := ioutil.ReadFile(path)
if err != nil {
- return "", errors.Wrap(err, "error reading pod ID file")
+ return "", fmt.Errorf("error reading pod ID file: %w", err)
}
return strings.Split(string(content), "\n")[0], nil
}
@@ -50,7 +51,7 @@ func CreateExpose(expose []string) (map[uint16]string, error) {
proto := "tcp"
splitProto := strings.Split(e, "/")
if len(splitProto) > 2 {
- return nil, errors.Errorf("invalid expose format - protocol can only be specified once")
+ return nil, errors.New("invalid expose format - protocol can only be specified once")
} else if len(splitProto) == 2 {
proto = splitProto[1]
}
@@ -96,7 +97,7 @@ func CreatePortBindings(ports []string) ([]types.PortMapping, error) {
case 2:
proto = &(splitProto[1])
default:
- return nil, errors.Errorf("invalid port format - protocol can only be specified once")
+ return nil, errors.New("invalid port format - protocol can only be specified once")
}
remainder := splitProto[0]
@@ -111,23 +112,23 @@ func CreatePortBindings(ports []string) ([]types.PortMapping, error) {
// We potentially have an IPv6 address
haveV6 = true
if !strings.HasPrefix(splitV6[0], "[") {
- return nil, errors.Errorf("invalid port format - IPv6 addresses must be enclosed by []")
+ return nil, errors.New("invalid port format - IPv6 addresses must be enclosed by []")
}
if !strings.HasPrefix(splitV6[1], ":") {
- return nil, errors.Errorf("invalid port format - IPv6 address must be followed by a colon (':')")
+ return nil, errors.New("invalid port format - IPv6 address must be followed by a colon (':')")
}
ipNoPrefix := strings.TrimPrefix(splitV6[0], "[")
hostIP = &ipNoPrefix
remainder = strings.TrimPrefix(splitV6[1], ":")
default:
- return nil, errors.Errorf("invalid port format - at most one IPv6 address can be specified in a --publish")
+ return nil, errors.New("invalid port format - at most one IPv6 address can be specified in a --publish")
}
splitPort := strings.Split(remainder, ":")
switch len(splitPort) {
case 1:
if haveV6 {
- return nil, errors.Errorf("invalid port format - must provide host and destination port if specifying an IP")
+ return nil, errors.New("invalid port format - must provide host and destination port if specifying an IP")
}
ctrPort = splitPort[0]
case 2:
@@ -135,13 +136,13 @@ func CreatePortBindings(ports []string) ([]types.PortMapping, error) {
ctrPort = splitPort[1]
case 3:
if haveV6 {
- return nil, errors.Errorf("invalid port format - when v6 address specified, must be [ipv6]:hostPort:ctrPort")
+ return nil, errors.New("invalid port format - when v6 address specified, must be [ipv6]:hostPort:ctrPort")
}
hostIP = &(splitPort[0])
hostPort = &(splitPort[1])
ctrPort = splitPort[2]
default:
- return nil, errors.Errorf("invalid port format - format is [[hostIP:]hostPort:]containerPort")
+ return nil, errors.New("invalid port format - format is [[hostIP:]hostPort:]containerPort")
}
newPort, err := parseSplitPort(hostIP, hostPort, ctrPort, proto)
@@ -160,30 +161,30 @@ func CreatePortBindings(ports []string) ([]types.PortMapping, error) {
func parseSplitPort(hostIP, hostPort *string, ctrPort string, protocol *string) (types.PortMapping, error) {
newPort := types.PortMapping{}
if ctrPort == "" {
- return newPort, errors.Errorf("must provide a non-empty container port to publish")
+ return newPort, errors.New("must provide a non-empty container port to publish")
}
ctrStart, ctrLen, err := parseAndValidateRange(ctrPort)
if err != nil {
- return newPort, errors.Wrapf(err, "error parsing container port")
+ return newPort, fmt.Errorf("error parsing container port: %w", err)
}
newPort.ContainerPort = ctrStart
newPort.Range = ctrLen
if protocol != nil {
if *protocol == "" {
- return newPort, errors.Errorf("must provide a non-empty protocol to publish")
+ return newPort, errors.New("must provide a non-empty protocol to publish")
}
newPort.Protocol = *protocol
}
if hostIP != nil {
if *hostIP == "" {
- return newPort, errors.Errorf("must provide a non-empty container host IP to publish")
+ return newPort, errors.New("must provide a non-empty container host IP to publish")
} else if *hostIP != "0.0.0.0" {
// If hostIP is 0.0.0.0, leave it unset - CNI treats
// 0.0.0.0 and empty differently, Docker does not.
testIP := net.ParseIP(*hostIP)
if testIP == nil {
- return newPort, errors.Errorf("cannot parse %q as an IP address", *hostIP)
+ return newPort, fmt.Errorf("cannot parse %q as an IP address", *hostIP)
}
newPort.HostIP = testIP.String()
}
@@ -196,10 +197,10 @@ func parseSplitPort(hostIP, hostPort *string, ctrPort string, protocol *string)
} else {
hostStart, hostLen, err := parseAndValidateRange(*hostPort)
if err != nil {
- return newPort, errors.Wrapf(err, "error parsing host port")
+ return newPort, fmt.Errorf("error parsing host port: %w", err)
}
if hostLen != ctrLen {
- return newPort, errors.Errorf("host and container port ranges have different lengths: %d vs %d", hostLen, ctrLen)
+ return newPort, fmt.Errorf("host and container port ranges have different lengths: %d vs %d", hostLen, ctrLen)
}
newPort.HostPort = hostStart
}
@@ -216,11 +217,11 @@ func parseSplitPort(hostIP, hostPort *string, ctrPort string, protocol *string)
func parseAndValidateRange(portRange string) (uint16, uint16, error) {
splitRange := strings.Split(portRange, "-")
if len(splitRange) > 2 {
- return 0, 0, errors.Errorf("invalid port format - port ranges are formatted as startPort-stopPort")
+ return 0, 0, errors.New("invalid port format - port ranges are formatted as startPort-stopPort")
}
if splitRange[0] == "" {
- return 0, 0, errors.Errorf("port numbers cannot be negative")
+ return 0, 0, errors.New("port numbers cannot be negative")
}
startPort, err := parseAndValidatePort(splitRange[0])
@@ -231,14 +232,14 @@ func parseAndValidateRange(portRange string) (uint16, uint16, error) {
var rangeLen uint16 = 1
if len(splitRange) == 2 {
if splitRange[1] == "" {
- return 0, 0, errors.Errorf("must provide ending number for port range")
+ return 0, 0, errors.New("must provide ending number for port range")
}
endPort, err := parseAndValidatePort(splitRange[1])
if err != nil {
return 0, 0, err
}
if endPort <= startPort {
- return 0, 0, errors.Errorf("the end port of a range must be higher than the start port - %d is not higher than %d", endPort, startPort)
+ return 0, 0, fmt.Errorf("the end port of a range must be higher than the start port - %d is not higher than %d", endPort, startPort)
}
// Our range is the total number of ports
// involved, so we need to add 1 (8080:8081 is
@@ -253,10 +254,10 @@ func parseAndValidateRange(portRange string) (uint16, uint16, error) {
func parseAndValidatePort(port string) (uint16, error) {
num, err := strconv.Atoi(port)
if err != nil {
- return 0, errors.Wrapf(err, "invalid port number")
+ return 0, fmt.Errorf("invalid port number: %w", err)
}
if num < 1 || num > 65535 {
- return 0, errors.Errorf("port numbers must be between 1 and 65535 (inclusive), got %d", num)
+ return 0, fmt.Errorf("port numbers must be between 1 and 65535 (inclusive), got %d", num)
}
return uint16(num), nil
}
diff --git a/pkg/specgenutil/volumes.go b/pkg/specgenutil/volumes.go
index 50d745380..e9fffdca9 100644
--- a/pkg/specgenutil/volumes.go
+++ b/pkg/specgenutil/volumes.go
@@ -2,6 +2,7 @@ package specgenutil
import (
"encoding/csv"
+ "errors"
"fmt"
"path"
"strings"
@@ -11,14 +12,13 @@ import (
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
var (
- errDuplicateDest = errors.Errorf("duplicate mount destination")
- optionArgError = errors.Errorf("must provide an argument for option")
- noDestError = errors.Errorf("must set volume destination")
- errInvalidSyntax = errors.Errorf("incorrect mount format: should be --mount type=<bind|tmpfs|volume>,[src=<host-dir|volume-name>,]target=<ctr-dir>[,options]")
+ errDuplicateDest = errors.New("duplicate mount destination")
+ errOptionArg = errors.New("must provide an argument for option")
+ errNoDest = errors.New("must set volume destination")
+ errInvalidSyntax = errors.New("incorrect mount format: should be --mount type=<bind|tmpfs|volume>,[src=<host-dir|volume-name>,]target=<ctr-dir>[,options]")
)
// Parse all volume-related options in the create config into a set of mounts
@@ -50,20 +50,20 @@ func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bo
// Start with --volume.
for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok {
- return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ return nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
}
unifiedMounts[dest] = mount
}
for dest, volume := range volumeVolumes {
if _, ok := unifiedVolumes[dest]; ok {
- return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ return nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
}
unifiedVolumes[dest] = volume
}
// Now --tmpfs
for dest, tmpfs := range tmpfsMounts {
if _, ok := unifiedMounts[dest]; ok {
- return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ return nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
}
unifiedMounts[dest] = tmpfs
}
@@ -93,7 +93,7 @@ func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bo
allMounts := make(map[string]bool)
testAndSet := func(dest string) error {
if _, ok := allMounts[dest]; ok {
- return errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest)
+ return fmt.Errorf("conflict at mount destination %v: %w", dest, errDuplicateDest)
}
allMounts[dest] = true
return nil
@@ -125,7 +125,7 @@ func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bo
if mount.Type == define.TypeBind {
absSrc, err := specgen.ConvertWinMountPath(mount.Source)
if err != nil {
- return nil, nil, nil, nil, errors.Wrapf(err, "error getting absolute path of %s", mount.Source)
+ return nil, nil, nil, nil, fmt.Errorf("error getting absolute path of %s: %w", mount.Source, err)
}
mount.Source = absSrc
}
@@ -199,7 +199,7 @@ func Mounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.Name
return nil, nil, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
+ return nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
case define.TypeTmpfs:
@@ -208,7 +208,7 @@ func Mounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.Name
return nil, nil, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
+ return nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
case define.TypeDevpts:
@@ -217,7 +217,7 @@ func Mounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.Name
return nil, nil, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
+ return nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
case "image":
@@ -226,7 +226,7 @@ func Mounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.Name
return nil, nil, nil, err
}
if _, ok := finalImageVolumes[volume.Destination]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, volume.Destination)
+ return nil, nil, nil, fmt.Errorf("%v: %w", volume.Destination, errDuplicateDest)
}
finalImageVolumes[volume.Destination] = volume
case "volume":
@@ -235,11 +235,11 @@ func Mounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.Name
return nil, nil, nil, err
}
if _, ok := finalNamedVolumes[volume.Dest]; ok {
- return nil, nil, nil, errors.Wrapf(errDuplicateDest, volume.Dest)
+ return nil, nil, nil, fmt.Errorf("%v: %w", volume.Dest, errDuplicateDest)
}
finalNamedVolumes[volume.Dest] = volume
default:
- return nil, nil, nil, errors.Errorf("invalid filesystem type %q", mountType)
+ return nil, nil, nil, fmt.Errorf("invalid filesystem type %q", mountType)
}
}
@@ -261,7 +261,7 @@ func getBindMount(args []string) (spec.Mount, error) {
newMount.Options = append(newMount.Options, "bind")
case "readonly", "ro", "rw":
if setRORW {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'readonly', 'ro', or 'rw' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'readonly', 'ro', or 'rw' options more than once: %w", errOptionArg)
}
setRORW = true
// Can be formatted as one of:
@@ -288,26 +288,26 @@ func getBindMount(args []string) (spec.Mount, error) {
newMount.Options = append(newMount.Options, "ro")
}
default:
- return newMount, errors.Wrapf(optionArgError, "'readonly', 'ro', or 'rw' must be set to true or false, instead received %q", kv[1])
+ return newMount, fmt.Errorf("'readonly', 'ro', or 'rw' must be set to true or false, instead received %q: %w", kv[1], errOptionArg)
}
default:
- return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val)
+ return newMount, fmt.Errorf("badly formatted option %q: %w", val, errOptionArg)
}
case "nosuid", "suid":
if setSuid {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'nosuid' and 'suid' options more than once: %w", errOptionArg)
}
setSuid = true
newMount.Options = append(newMount.Options, kv[0])
case "nodev", "dev":
if setDev {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'nodev' and 'dev' options more than once: %w", errOptionArg)
}
setDev = true
newMount.Options = append(newMount.Options, kv[0])
case "noexec", "exec":
if setExec {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'noexec' and 'exec' options more than once: %w", errOptionArg)
}
setExec = true
newMount.Options = append(newMount.Options, kv[0])
@@ -315,21 +315,21 @@ func getBindMount(args []string) (spec.Mount, error) {
newMount.Options = append(newMount.Options, kv[0])
case "bind-propagation":
if len(kv) == 1 {
- return newMount, errors.Wrapf(optionArgError, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
- return newMount, errors.Wrapf(optionArgError, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
if len(kv[1]) == 0 {
- return newMount, errors.Wrapf(optionArgError, "host directory cannot be empty")
+ return newMount, fmt.Errorf("host directory cannot be empty: %w", errOptionArg)
}
newMount.Source = kv[1]
setSource = true
case "target", "dst", "destination":
if len(kv) == 1 {
- return newMount, errors.Wrapf(optionArgError, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
@@ -338,11 +338,11 @@ func getBindMount(args []string) (spec.Mount, error) {
setDest = true
case "relabel":
if setRelabel {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'relabel' option more than once")
+ return newMount, fmt.Errorf("cannot pass 'relabel' option more than once: %w", errOptionArg)
}
setRelabel = true
if len(kv) != 2 {
- return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0])
+ return newMount, fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], util.ErrBadMntOption)
}
switch kv[1] {
case "private":
@@ -350,11 +350,11 @@ func getBindMount(args []string) (spec.Mount, error) {
case "shared":
newMount.Options = append(newMount.Options, "z")
default:
- return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0])
+ return newMount, fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], util.ErrBadMntOption)
}
case "U", "chown":
if setOwnership {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'U' or 'chown' option more than once")
+ return newMount, fmt.Errorf("cannot pass 'U' or 'chown' option more than once: %w", errOptionArg)
}
ok, err := validChownFlag(val)
if err != nil {
@@ -375,12 +375,12 @@ func getBindMount(args []string) (spec.Mount, error) {
// Since Docker ignores this option so shall we.
continue
default:
- return newMount, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0])
+ return newMount, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption)
}
}
if !setDest {
- return newMount, noDestError
+ return newMount, errNoDest
}
if !setSource {
@@ -409,49 +409,49 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
switch kv[0] {
case "tmpcopyup", "notmpcopyup":
if setTmpcopyup {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'tmpcopyup' and 'notmpcopyup' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'tmpcopyup' and 'notmpcopyup' options more than once: %w", errOptionArg)
}
setTmpcopyup = true
newMount.Options = append(newMount.Options, kv[0])
case "ro", "rw":
if setRORW {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'ro' and 'rw' options more than once: %w", errOptionArg)
}
setRORW = true
newMount.Options = append(newMount.Options, kv[0])
case "nosuid", "suid":
if setSuid {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'nosuid' and 'suid' options more than once: %w", errOptionArg)
}
setSuid = true
newMount.Options = append(newMount.Options, kv[0])
case "nodev", "dev":
if setDev {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'nodev' and 'dev' options more than once: %w", errOptionArg)
}
setDev = true
newMount.Options = append(newMount.Options, kv[0])
case "noexec", "exec":
if setExec {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
+ return newMount, fmt.Errorf("cannot pass 'noexec' and 'exec' options more than once: %w", errOptionArg)
}
setExec = true
newMount.Options = append(newMount.Options, kv[0])
case "tmpfs-mode":
if len(kv) == 1 {
- return newMount, errors.Wrapf(optionArgError, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
case "tmpfs-size":
if len(kv) == 1 {
- return newMount, errors.Wrapf(optionArgError, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
case "src", "source":
- return newMount, errors.Errorf("source is not supported with tmpfs mounts")
+ return newMount, fmt.Errorf("source is not supported with tmpfs mounts")
case "target", "dst", "destination":
if len(kv) == 1 {
- return newMount, errors.Wrapf(optionArgError, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
@@ -460,7 +460,7 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
setDest = true
case "U", "chown":
if setOwnership {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'U' or 'chown' option more than once")
+ return newMount, fmt.Errorf("cannot pass 'U' or 'chown' option more than once: %w", errOptionArg)
}
ok, err := validChownFlag(val)
if err != nil {
@@ -475,12 +475,12 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
// Since Docker ignores this option so shall we.
continue
default:
- return newMount, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0])
+ return newMount, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption)
}
}
if !setDest {
- return newMount, noDestError
+ return newMount, errNoDest
}
return newMount, nil
@@ -502,7 +502,7 @@ func getDevptsMount(args []string) (spec.Mount, error) {
newMount.Options = append(newMount.Options, val)
case "target", "dst", "destination":
if len(kv) == 1 {
- return newMount, errors.Wrapf(optionArgError, kv[0])
+ return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
@@ -510,12 +510,12 @@ func getDevptsMount(args []string) (spec.Mount, error) {
newMount.Destination = unixPathClean(kv[1])
setDest = true
default:
- return newMount, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0])
+ return newMount, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption)
}
}
if !setDest {
- return newMount, noDestError
+ return newMount, errNoDest
}
return newMount, nil
@@ -536,38 +536,38 @@ func getNamedVolume(args []string) (*specgen.NamedVolume, error) {
newVolume.Options = append(newVolume.Options, val)
case "ro", "rw":
if setRORW {
- return nil, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
+ return nil, fmt.Errorf("cannot pass 'ro' and 'rw' options more than once: %w", errOptionArg)
}
setRORW = true
newVolume.Options = append(newVolume.Options, kv[0])
case "nosuid", "suid":
if setSuid {
- return nil, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
+ return nil, fmt.Errorf("cannot pass 'nosuid' and 'suid' options more than once: %w", errOptionArg)
}
setSuid = true
newVolume.Options = append(newVolume.Options, kv[0])
case "nodev", "dev":
if setDev {
- return nil, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
+ return nil, fmt.Errorf("cannot pass 'nodev' and 'dev' options more than once: %w", errOptionArg)
}
setDev = true
newVolume.Options = append(newVolume.Options, kv[0])
case "noexec", "exec":
if setExec {
- return nil, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
+ return nil, fmt.Errorf("cannot pass 'noexec' and 'exec' options more than once: %w", errOptionArg)
}
setExec = true
newVolume.Options = append(newVolume.Options, kv[0])
case "volume-label":
- return nil, errors.Errorf("the --volume-label option is not presently implemented")
+ return nil, fmt.Errorf("the --volume-label option is not presently implemented")
case "src", "source":
if len(kv) == 1 {
- return nil, errors.Wrapf(optionArgError, kv[0])
+ return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
newVolume.Name = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
- return nil, errors.Wrapf(optionArgError, kv[0])
+ return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return nil, err
@@ -576,7 +576,7 @@ func getNamedVolume(args []string) (*specgen.NamedVolume, error) {
setDest = true
case "U", "chown":
if setOwnership {
- return newVolume, errors.Wrapf(optionArgError, "cannot pass 'U' or 'chown' option more than once")
+ return newVolume, fmt.Errorf("cannot pass 'U' or 'chown' option more than once: %w", errOptionArg)
}
ok, err := validChownFlag(val)
if err != nil {
@@ -591,12 +591,12 @@ func getNamedVolume(args []string) (*specgen.NamedVolume, error) {
// Since Docker ignores this option so shall we.
continue
default:
- return nil, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0])
+ return nil, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption)
}
}
if !setDest {
- return nil, noDestError
+ return nil, errNoDest
}
return newVolume, nil
@@ -605,7 +605,7 @@ func getNamedVolume(args []string) (*specgen.NamedVolume, error) {
// Parse the arguments into an image volume. An image volume is a volume based
// on a container image. The container image is first mounted on the host and
// is then bind-mounted into the container. An ImageVolume is always mounted
-// read only.
+// read-only.
func getImageVolume(args []string) (*specgen.ImageVolume, error) {
newVolume := new(specgen.ImageVolume)
@@ -614,12 +614,12 @@ func getImageVolume(args []string) (*specgen.ImageVolume, error) {
switch kv[0] {
case "src", "source":
if len(kv) == 1 {
- return nil, errors.Wrapf(optionArgError, kv[0])
+ return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
newVolume.Source = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
- return nil, errors.Wrapf(optionArgError, kv[0])
+ return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return nil, err
@@ -632,19 +632,19 @@ func getImageVolume(args []string) (*specgen.ImageVolume, error) {
case "false":
// Nothing to do. RO is default.
default:
- return nil, errors.Wrapf(util.ErrBadMntOption, "invalid rw value %q", kv[1])
+ return nil, fmt.Errorf("invalid rw value %q: %w", kv[1], util.ErrBadMntOption)
}
case "consistency":
// Often used on MACs and mistakenly on Linux platforms.
// Since Docker ignores this option so shall we.
continue
default:
- return nil, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0])
+ return nil, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption)
}
}
if len(newVolume.Source)*len(newVolume.Destination) == 0 {
- return nil, errors.Errorf("must set source and destination for image volume")
+ return nil, errors.New("must set source and destination for image volume")
}
return newVolume, nil
@@ -666,7 +666,7 @@ func getTmpfsMounts(tmpfsFlag []string) (map[string]spec.Mount, error) {
}
if _, ok := m[destPath]; ok {
- return nil, errors.Wrapf(errDuplicateDest, destPath)
+ return nil, fmt.Errorf("%v: %w", destPath, errDuplicateDest)
}
mount := spec.Mount{
@@ -692,10 +692,10 @@ func validChownFlag(flag string) (bool, error) {
case "false":
return false, nil
default:
- return false, errors.Wrapf(optionArgError, "'U' or 'chown' must be set to true or false, instead received %q", kv[1])
+ return false, fmt.Errorf("'U' or 'chown' must be set to true or false, instead received %q: %w", kv[1], errOptionArg)
}
default:
- return false, errors.Wrapf(optionArgError, "badly formatted option %q", flag)
+ return false, fmt.Errorf("badly formatted option %q: %w", flag, errOptionArg)
}
return true, nil
diff --git a/pkg/systemd/generate/common.go b/pkg/systemd/generate/common.go
index e53d37897..60b0c4b52 100644
--- a/pkg/systemd/generate/common.go
+++ b/pkg/systemd/generate/common.go
@@ -1,11 +1,11 @@
package generate
import (
+ "fmt"
"strconv"
"strings"
"github.com/containers/podman/v4/pkg/systemd/define"
- "github.com/pkg/errors"
)
// minTimeoutStopSec is the minimal stop timeout for generated systemd units.
@@ -20,7 +20,7 @@ func validateRestartPolicy(restart string) error {
return nil
}
}
- return errors.Errorf("%s is not a valid restart policy", restart)
+ return fmt.Errorf("%s is not a valid restart policy", restart)
}
const headerTemplate = `# {{{{.ServiceName}}}}{{{{- if (eq .IdentifySpecifier true) }}}}@{{{{- end}}}}.service
diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go
index d552e21ed..6596ef73b 100644
--- a/pkg/systemd/generate/containers.go
+++ b/pkg/systemd/generate/containers.go
@@ -2,6 +2,7 @@ package generate
import (
"bytes"
+ "errors"
"fmt"
"os"
"sort"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/systemd/define"
"github.com/containers/podman/v4/version"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
@@ -186,14 +186,14 @@ func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSyste
config := ctr.Config()
conmonPidFile := config.ConmonPidFile
if conmonPidFile == "" {
- return nil, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
+ return nil, errors.New("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
}
createCommand := []string{}
if config.CreateCommand != nil {
createCommand = config.CreateCommand
} else if options.New {
- return nil, errors.Errorf("cannot use --new on container %q: no create command found: only works on containers created directly with podman but not via REST API", ctr.ID())
+ return nil, fmt.Errorf("cannot use --new on container %q: no create command found: only works on containers created directly with podman but not via REST API", ctr.ID())
}
nameOrID, serviceName := containerServiceName(ctr, options)
@@ -204,7 +204,7 @@ func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSyste
} else {
runRoot = ctr.Runtime().RunRoot()
if runRoot == "" {
- return nil, errors.Errorf("could not lookup container's runroot: got empty string")
+ return nil, errors.New("could not look up container's runroot: got empty string")
}
}
@@ -350,7 +350,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
}
}
if index == 0 {
- return "", errors.Errorf("container's create command is too short or invalid: %v", info.CreateCommand)
+ return "", fmt.Errorf("container's create command is too short or invalid: %v", info.CreateCommand)
}
// We're hard-coding the first five arguments and append the
// CreateCommand with a stripped command and subcommand.
@@ -520,7 +520,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
// template execution.
templ, err := template.New("container_template").Delims("{{{{", "}}}}").Parse(containerTemplate)
if err != nil {
- return "", errors.Wrap(err, "error parsing systemd service template")
+ return "", fmt.Errorf("error parsing systemd service template: %w", err)
}
var buf bytes.Buffer
@@ -531,7 +531,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
// Now parse the generated template (i.e., buf) and execute it.
templ, err = template.New("container_template").Delims("{{{{", "}}}}").Parse(buf.String())
if err != nil {
- return "", errors.Wrap(err, "error parsing systemd service template")
+ return "", fmt.Errorf("error parsing systemd service template: %w", err)
}
buf = bytes.Buffer{}
diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go
index 4043fbfcb..8640418a7 100644
--- a/pkg/systemd/generate/pods.go
+++ b/pkg/systemd/generate/pods.go
@@ -2,6 +2,7 @@ package generate
import (
"bytes"
+ "errors"
"fmt"
"os"
"sort"
@@ -13,7 +14,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/systemd/define"
"github.com/containers/podman/v4/version"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
@@ -141,7 +141,7 @@ func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (map[str
// Error out if the pod has no infra container, which we require to be the
// main service.
if !pod.HasInfraContainer() {
- return nil, errors.Errorf("generating systemd unit files: Pod %q has no infra container", pod.Name())
+ return nil, fmt.Errorf("generating systemd unit files: Pod %q has no infra container", pod.Name())
}
podInfo, err := generatePodInfo(pod, options)
@@ -160,7 +160,7 @@ func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (map[str
return nil, err
}
if len(containers) == 0 {
- return nil, errors.Errorf("generating systemd unit files: Pod %q has no containers", pod.Name())
+ return nil, fmt.Errorf("generating systemd unit files: Pod %q has no containers", pod.Name())
}
graph, err := libpod.BuildContainerGraph(containers)
if err != nil {
@@ -217,7 +217,7 @@ func generatePodInfo(pod *libpod.Pod, options entities.GenerateSystemdOptions) (
// containerInfo acts as the main service of the pod.
infraCtr, err := pod.InfraContainer()
if err != nil {
- return nil, errors.Wrap(err, "could not find infra container")
+ return nil, fmt.Errorf("could not find infra container: %w", err)
}
stopTimeout := infraCtr.StopTimeout()
@@ -228,12 +228,12 @@ func generatePodInfo(pod *libpod.Pod, options entities.GenerateSystemdOptions) (
config := infraCtr.Config()
conmonPidFile := config.ConmonPidFile
if conmonPidFile == "" {
- return nil, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
+ return nil, errors.New("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
}
createCommand := pod.CreateCommand()
if options.New && len(createCommand) == 0 {
- return nil, errors.Errorf("cannot use --new on pod %q: no create command found", pod.ID())
+ return nil, fmt.Errorf("cannot use --new on pod %q: no create command found", pod.ID())
}
nameOrID := pod.ID()
@@ -312,7 +312,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions)
var podRootArgs, podCreateArgs []string
switch len(info.CreateCommand) {
case 0, 1, 2:
- return "", errors.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
+ return "", fmt.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
default:
// Make sure that pod was created with `pod create` and
// not something else, such as `run --pod new`.
@@ -323,7 +323,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions)
}
}
if podCreateIndex == 0 {
- return "", errors.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
+ return "", fmt.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
}
podRootArgs = info.CreateCommand[1 : podCreateIndex-1]
info.RootFlags = strings.Join(escapeSystemdArguments(podRootArgs), " ")
@@ -402,7 +402,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions)
// template execution.
templ, err := template.New("pod_template").Delims("{{{{", "}}}}").Parse(podTemplate)
if err != nil {
- return "", errors.Wrap(err, "error parsing systemd service template")
+ return "", fmt.Errorf("error parsing systemd service template: %w", err)
}
var buf bytes.Buffer
@@ -413,7 +413,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions)
// Now parse the generated template (i.e., buf) and execute it.
templ, err = template.New("pod_template").Delims("{{{{", "}}}}").Parse(buf.String())
if err != nil {
- return "", errors.Wrap(err, "error parsing systemd service template")
+ return "", fmt.Errorf("error parsing systemd service template: %w", err)
}
buf = bytes.Buffer{}
diff --git a/pkg/trust/trust.go b/pkg/trust/trust.go
index 1d0cc61ba..663a1b5e2 100644
--- a/pkg/trust/trust.go
+++ b/pkg/trust/trust.go
@@ -5,6 +5,7 @@ import (
"bytes"
"encoding/base64"
"encoding/json"
+ "fmt"
"io/ioutil"
"os"
"os/exec"
@@ -14,7 +15,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/docker/docker/pkg/homedir"
"github.com/ghodss/yaml"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -126,11 +126,11 @@ func LoadAndMergeConfig(dirPath string) (*RegistryConfiguration, error) {
var config RegistryConfiguration
err = yaml.Unmarshal(configBytes, &config)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing %s", configPath)
+ return nil, fmt.Errorf("error parsing %s: %w", configPath, err)
}
if config.DefaultDocker != nil {
if mergedConfig.DefaultDocker != nil {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
+ return nil, fmt.Errorf(`error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
dockerDefaultMergedFrom, configPath)
}
mergedConfig.DefaultDocker = config.DefaultDocker
@@ -138,7 +138,7 @@ func LoadAndMergeConfig(dirPath string) (*RegistryConfiguration, error) {
}
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
if _, ok := mergedConfig.Docker[nsName]; ok {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
+ return nil, fmt.Errorf(`error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
nsName, nsMergedFrom[nsName], configPath)
}
mergedConfig.Docker[nsName] = nsConfig
@@ -234,10 +234,10 @@ func GetPolicy(policyPath string) (PolicyContent, error) {
var policyContentStruct PolicyContent
policyContent, err := ioutil.ReadFile(policyPath)
if err != nil {
- return policyContentStruct, errors.Wrap(err, "unable to read policy file")
+ return policyContentStruct, fmt.Errorf("unable to read policy file: %w", err)
}
if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil {
- return policyContentStruct, errors.Wrapf(err, "could not parse trust policies from %s", policyPath)
+ return policyContentStruct, fmt.Errorf("could not parse trust policies from %s: %w", policyPath, err)
}
return policyContentStruct, nil
}
diff --git a/pkg/util/filters.go b/pkg/util/filters.go
index 05ba4f82c..08148806f 100644
--- a/pkg/util/filters.go
+++ b/pkg/util/filters.go
@@ -2,6 +2,7 @@ package util
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
"path/filepath"
@@ -9,14 +10,13 @@ import (
"time"
"github.com/containers/podman/v4/pkg/timetype"
- "github.com/pkg/errors"
)
// ComputeUntilTimestamp extracts until timestamp from filters
func ComputeUntilTimestamp(filterValues []string) (time.Time, error) {
invalid := time.Time{}
if len(filterValues) != 1 {
- return invalid, errors.Errorf("specify exactly one timestamp for until")
+ return invalid, errors.New("specify exactly one timestamp for until")
}
ts, err := timetype.GetTimestamp(filterValues[0], time.Now())
if err != nil {
diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go
index e37394619..49fd5b467 100644
--- a/pkg/util/mountOpts.go
+++ b/pkg/util/mountOpts.go
@@ -1,16 +1,16 @@
package util
import (
+ "errors"
+ "fmt"
"strings"
-
- "github.com/pkg/errors"
)
var (
// ErrBadMntOption indicates that an invalid mount option was passed.
- ErrBadMntOption = errors.Errorf("invalid mount option")
+ ErrBadMntOption = errors.New("invalid mount option")
// ErrDupeMntOption indicates that a duplicate mount option was passed.
- ErrDupeMntOption = errors.Errorf("duplicate mount option passed")
+ ErrDupeMntOption = errors.New("duplicate mount option passed")
)
type defaultMountOptions struct {
@@ -25,7 +25,7 @@ type defaultMountOptions struct {
// The sourcePath variable, if not empty, contains a bind mount source.
func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) {
var (
- foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap bool
+ foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy bool
)
newOptions := make([]string, 0, len(options))
@@ -47,7 +47,7 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string
if strings.HasPrefix(splitOpt[0], "idmap") {
if foundIdmap {
- return nil, errors.Wrapf(ErrDupeMntOption, "the 'idmap' option can only be set once")
+ return nil, fmt.Errorf("the 'idmap' option can only be set once: %w", ErrDupeMntOption)
}
foundIdmap = true
newOptions = append(newOptions, opt)
@@ -55,6 +55,11 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string
}
switch splitOpt[0] {
+ case "copy", "nocopy":
+ if foundCopy {
+ return nil, fmt.Errorf("only one of 'nocopy' and 'copy' can be used: %w", ErrDupeMntOption)
+ }
+ foundCopy = true
case "O":
foundOverlay = true
case "volume-opt":
@@ -62,51 +67,51 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string
newOptions = append(newOptions, opt)
case "exec", "noexec":
if foundExec {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'noexec' and 'exec' can be used")
+ return nil, fmt.Errorf("only one of 'noexec' and 'exec' can be used: %w", ErrDupeMntOption)
}
foundExec = true
case "suid", "nosuid":
if foundSuid {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nosuid' and 'suid' can be used")
+ return nil, fmt.Errorf("only one of 'nosuid' and 'suid' can be used: %w", ErrDupeMntOption)
}
foundSuid = true
case "nodev", "dev":
if foundDev {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nodev' and 'dev' can be used")
+ return nil, fmt.Errorf("only one of 'nodev' and 'dev' can be used: %w", ErrDupeMntOption)
}
foundDev = true
case "rw", "ro":
if foundWrite {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rw' and 'ro' can be used")
+ return nil, fmt.Errorf("only one of 'rw' and 'ro' can be used: %w", ErrDupeMntOption)
}
foundWrite = true
case "private", "rprivate", "slave", "rslave", "shared", "rshared", "unbindable", "runbindable":
if foundProp {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one root propagation mode can be used")
+ return nil, fmt.Errorf("only one root propagation mode can be used: %w", ErrDupeMntOption)
}
foundProp = true
case "size":
if !isTmpfs {
- return nil, errors.Wrapf(ErrBadMntOption, "the 'size' option is only allowed with tmpfs mounts")
+ return nil, fmt.Errorf("the 'size' option is only allowed with tmpfs mounts: %w", ErrBadMntOption)
}
if foundSize {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs size can be specified")
+ return nil, fmt.Errorf("only one tmpfs size can be specified: %w", ErrDupeMntOption)
}
foundSize = true
case "mode":
if !isTmpfs {
- return nil, errors.Wrapf(ErrBadMntOption, "the 'mode' option is only allowed with tmpfs mounts")
+ return nil, fmt.Errorf("the 'mode' option is only allowed with tmpfs mounts: %w", ErrBadMntOption)
}
if foundMode {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs mode can be specified")
+ return nil, fmt.Errorf("only one tmpfs mode can be specified: %w", ErrDupeMntOption)
}
foundMode = true
case "tmpcopyup":
if !isTmpfs {
- return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts")
+ return nil, fmt.Errorf("the 'tmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption)
}
if foundCopyUp {
- return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once")
+ return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption)
}
foundCopyUp = true
case "consistency":
@@ -115,37 +120,37 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string
continue
case "notmpcopyup":
if !isTmpfs {
- return nil, errors.Wrapf(ErrBadMntOption, "the 'notmpcopyup' option is only allowed with tmpfs mounts")
+ return nil, fmt.Errorf("the 'notmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption)
}
if foundCopyUp {
- return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once")
+ return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption)
}
foundCopyUp = true
// do not propagate notmpcopyup to the OCI runtime
continue
case "bind", "rbind":
if isTmpfs {
- return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts")
+ return nil, fmt.Errorf("the 'bind' and 'rbind' options are not allowed with tmpfs mounts: %w", ErrBadMntOption)
}
if foundBind {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rbind' and 'bind' can be used")
+ return nil, fmt.Errorf("only one of 'rbind' and 'bind' can be used: %w", ErrDupeMntOption)
}
foundBind = true
case "z", "Z":
if isTmpfs {
- return nil, errors.Wrapf(ErrBadMntOption, "the 'z' and 'Z' options are not allowed with tmpfs mounts")
+ return nil, fmt.Errorf("the 'z' and 'Z' options are not allowed with tmpfs mounts: %w", ErrBadMntOption)
}
if foundZ {
- return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'z' and 'Z' can be used")
+ return nil, fmt.Errorf("only one of 'z' and 'Z' can be used: %w", ErrDupeMntOption)
}
foundZ = true
case "U":
if foundU {
- return nil, errors.Wrapf(ErrDupeMntOption, "the 'U' option can only be set once")
+ return nil, fmt.Errorf("the 'U' option can only be set once: %w", ErrDupeMntOption)
}
foundU = true
default:
- return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt)
+ return nil, fmt.Errorf("unknown mount option %q: %w", opt, ErrBadMntOption)
}
newOptions = append(newOptions, opt)
}
@@ -182,11 +187,11 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string
func ParseDriverOpts(option string) (string, string, error) {
token := strings.SplitN(option, "=", 2)
if len(token) != 2 {
- return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts")
+ return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption)
}
opt := strings.SplitN(token[1], "=", 2)
if len(opt) != 2 {
- return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts")
+ return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption)
}
return opt[0], opt[1], nil
}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index a0bf8b50d..33c11d611 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -2,6 +2,7 @@ package util
import (
"encoding/json"
+ "errors"
"fmt"
"io/fs"
"math"
@@ -17,6 +18,7 @@ import (
"github.com/BurntSushi/toml"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/namespaces"
@@ -26,7 +28,6 @@ import (
stypes "github.com/containers/storage/types"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/term"
)
@@ -67,7 +68,7 @@ func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) {
fmt.Print("Password: ")
termPassword, err := term.ReadPassword(0)
if err != nil {
- return nil, errors.Wrapf(err, "could not read password from terminal")
+ return nil, fmt.Errorf("could not read password from terminal: %w", err)
}
password = string(termPassword)
}
@@ -78,14 +79,9 @@ func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) {
}, nil
}
-// StringInSlice determines if a string is in a string slice, returns bool
+// StringInSlice is deprecated, use containers/common/pkg/util/StringInSlice
func StringInSlice(s string, sl []string) bool {
- for _, i := range sl {
- if i == s {
- return true
- }
- }
- return false
+ return util.StringInSlice(s, sl)
}
// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool
@@ -133,7 +129,7 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
if len(split) != 2 {
split = strings.SplitN(change, "=", 2)
if len(split) != 2 {
- return ImageConfig{}, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
}
}
@@ -143,7 +139,7 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
case "USER":
// Assume literal contents are the user.
if value == "" {
- return ImageConfig{}, errors.Errorf("invalid change %q - must provide a value to USER", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - must provide a value to USER", change)
}
config.User = value
case "EXPOSE":
@@ -152,14 +148,14 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
// Protocol must be "tcp" or "udp"
splitPort := strings.Split(value, "/")
if len(splitPort) > 2 {
- return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
}
portNum, err := strconv.Atoi(splitPort[0])
if err != nil {
- return ImageConfig{}, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - EXPOSE port must be an integer: %w", change, err)
}
if portNum > 65535 || portNum <= 0 {
- return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
}
proto := "tcp"
if len(splitPort) > 1 {
@@ -168,7 +164,7 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
case "tcp", "udp":
proto = testProto
default:
- return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
}
}
if config.ExposedPorts == nil {
@@ -192,7 +188,7 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
key = splitEnv[0]
// We do need a key
if key == "" {
- return ImageConfig{}, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - ENV must have at least one argument", change)
}
// Perfectly valid to not have a value
if len(splitEnv) == 2 {
@@ -254,11 +250,11 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
testUnmarshal = strings.Split(value, " ")
}
if len(testUnmarshal) == 0 {
- return ImageConfig{}, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
}
for _, vol := range testUnmarshal {
if vol == "" {
- return ImageConfig{}, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - VOLUME paths must not be empty", change)
}
if config.Volumes == nil {
config.Volumes = make(map[string]struct{})
@@ -272,7 +268,7 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
// WORKDIR c results in /A/b/c
// Just need to check it's not empty...
if value == "" {
- return ImageConfig{}, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
}
config.WorkingDir = filepath.Join(config.WorkingDir, value)
case "LABEL":
@@ -289,7 +285,7 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
splitLabel := strings.SplitN(value, "=", 2)
// Unlike ENV, LABEL must have a value
if len(splitLabel) != 2 {
- return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - LABEL must be formatted key=value", change)
}
key = splitLabel[0]
val = splitLabel[1]
@@ -302,7 +298,7 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
}
// Check key after we strip quotations
if key == "" {
- return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - LABEL must have a non-empty key", change)
}
if config.Labels == nil {
config.Labels = make(map[string]string)
@@ -312,17 +308,17 @@ func GetImageConfig(changes []string) (ImageConfig, error) {
// Check the provided signal for validity.
killSignal, err := ParseSignal(value)
if err != nil {
- return ImageConfig{}, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - KILLSIGNAL must be given a valid signal: %w", change, err)
}
config.StopSignal = fmt.Sprintf("%d", killSignal)
case "ONBUILD":
// Onbuild always appends.
if value == "" {
- return ImageConfig{}, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - ONBUILD must be given an argument", change)
}
config.OnBuild = append(config.OnBuild, value)
default:
- return ImageConfig{}, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
+ return ImageConfig{}, fmt.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
}
}
@@ -340,7 +336,7 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) {
}
// 64 is SIGRTMAX; wish we could get this from a standard Go library
if sig < 1 || sig > 64 {
- return -1, errors.Errorf("valid signals are 1 through 64")
+ return -1, errors.New("valid signals are 1 through 64")
}
return sig, nil
}
@@ -366,10 +362,10 @@ func GetKeepIDMapping() (*stypes.IDMappingOptions, int, int, error) {
uids, gids, err := rootless.GetConfiguredMappings()
if err != nil {
- return nil, -1, -1, errors.Wrapf(err, "cannot read mappings")
+ return nil, -1, -1, fmt.Errorf("cannot read mappings: %w", err)
}
if len(uids) == 0 || len(gids) == 0 {
- return nil, -1, -1, errors.Wrapf(err, "keep-id requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly")
+ return nil, -1, -1, fmt.Errorf("keep-id requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly: %w", err)
}
maxUID, maxGID := 0, 0
for _, u := range uids {
@@ -407,10 +403,10 @@ func GetNoMapMapping() (*stypes.IDMappingOptions, int, int, error) {
}
uids, gids, err := rootless.GetConfiguredMappings()
if err != nil {
- return nil, -1, -1, errors.Wrapf(err, "cannot read mappings")
+ return nil, -1, -1, fmt.Errorf("cannot read mappings: %w", err)
}
if len(uids) == 0 || len(gids) == 0 {
- return nil, -1, -1, errors.Wrapf(err, "nomap requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly")
+ return nil, -1, -1, fmt.Errorf("nomap requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly: %w", err)
}
options.UIDMap, options.GIDMap = nil, nil
uid, gid := 0, 0
@@ -570,7 +566,7 @@ func ParseInputTime(inputTime string, since bool) (time.Time, error) {
// input might be a duration
duration, err := time.ParseDuration(inputTime)
if err != nil {
- return time.Time{}, errors.Errorf("unable to interpret time value")
+ return time.Time{}, errors.New("unable to interpret time value")
}
if since {
return time.Now().Add(-duration), nil
@@ -611,7 +607,7 @@ func HomeDir() (string, error) {
if home == "" {
usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID()))
if err != nil {
- return "", errors.Wrapf(err, "unable to resolve HOME directory")
+ return "", fmt.Errorf("unable to resolve HOME directory: %w", err)
}
home = usr.HomeDir
}
@@ -649,12 +645,12 @@ func ValidateSysctls(strSlice []string) (map[string]string, error) {
foundMatch := false
arr := strings.Split(val, "=")
if len(arr) < 2 {
- return nil, errors.Errorf("%s is invalid, sysctl values must be in the form of KEY=VALUE", val)
+ return nil, fmt.Errorf("%s is invalid, sysctl values must be in the form of KEY=VALUE", val)
}
trimmed := fmt.Sprintf("%s=%s", strings.TrimSpace(arr[0]), strings.TrimSpace(arr[1]))
if trimmed != val {
- return nil, errors.Errorf("'%s' is invalid, extra spaces found", val)
+ return nil, fmt.Errorf("'%s' is invalid, extra spaces found", val)
}
if validSysctlMap[arr[0]] {
@@ -670,7 +666,7 @@ func ValidateSysctls(strSlice []string) (map[string]string, error) {
}
}
if !foundMatch {
- return nil, errors.Errorf("sysctl '%s' is not allowed", arr[0])
+ return nil, fmt.Errorf("sysctl '%s' is not allowed", arr[0])
}
}
return sysctl, nil
@@ -684,9 +680,9 @@ func CreateCidFile(cidfile string, id string) error {
cidFile, err := OpenExclusiveFile(cidfile)
if err != nil {
if os.IsExist(err) {
- return errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", cidfile)
+ return fmt.Errorf("container id file exists. Ensure another container is not using it or delete %s", cidfile)
}
- return errors.Errorf("opening cidfile %s", cidfile)
+ return fmt.Errorf("opening cidfile %s", cidfile)
}
if _, err = cidFile.WriteString(id); err != nil {
logrus.Error(err)
diff --git a/pkg/util/utils_darwin.go b/pkg/util/utils_darwin.go
index 66ae85e9c..3a2e587df 100644
--- a/pkg/util/utils_darwin.go
+++ b/pkg/util/utils_darwin.go
@@ -4,7 +4,7 @@
package util
import (
- "github.com/pkg/errors"
+ "errors"
)
func GetContainerPidInformationDescriptors() ([]string, error) {
diff --git a/pkg/util/utils_linux.go b/pkg/util/utils_linux.go
index 0b21bf3c5..e2d9e3e89 100644
--- a/pkg/util/utils_linux.go
+++ b/pkg/util/utils_linux.go
@@ -1,15 +1,26 @@
package util
import (
+ "errors"
"fmt"
"io/fs"
+ "io/ioutil"
"os"
"path/filepath"
+ "strings"
"syscall"
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/psgo"
- "github.com/pkg/errors"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
"github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ errNotADevice = errors.New("not a device node")
)
// GetContainerPidInformationDescriptors returns a string slice of all supported
@@ -42,7 +53,7 @@ func FindDeviceNodes() (map[string]string, error) {
// We are a device node. Get major/minor.
sysstat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
- return errors.Errorf("Could not convert stat output for use")
+ return errors.New("could not convert stat output for use")
}
// We must typeconvert sysstat.Rdev from uint64->int to avoid constant overflow
rdev := int(sysstat.Rdev)
@@ -59,3 +70,134 @@ func FindDeviceNodes() (map[string]string, error) {
return nodes, nil
}
+
+func AddPrivilegedDevices(g *generate.Generator) error {
+ hostDevices, err := getDevices("/dev")
+ if err != nil {
+ return err
+ }
+ g.ClearLinuxDevices()
+
+ if rootless.IsRootless() {
+ mounts := make(map[string]interface{})
+ for _, m := range g.Mounts() {
+ mounts[m.Destination] = true
+ }
+ newMounts := []spec.Mount{}
+ for _, d := range hostDevices {
+ devMnt := spec.Mount{
+ Destination: d.Path,
+ Type: define.TypeBind,
+ Source: d.Path,
+ Options: []string{"slave", "nosuid", "noexec", "rw", "rbind"},
+ }
+ if d.Path == "/dev/ptmx" || strings.HasPrefix(d.Path, "/dev/tty") {
+ continue
+ }
+ if _, found := mounts[d.Path]; found {
+ continue
+ }
+ newMounts = append(newMounts, devMnt)
+ }
+ g.Config.Mounts = append(newMounts, g.Config.Mounts...)
+ if g.Config.Linux.Resources != nil {
+ g.Config.Linux.Resources.Devices = nil
+ }
+ } else {
+ for _, d := range hostDevices {
+ g.AddDevice(d)
+ }
+ // Add resources device - need to clear the existing one first.
+ if g.Config.Linux.Resources != nil {
+ g.Config.Linux.Resources.Devices = nil
+ }
+ g.AddLinuxResourcesDevice(true, "", nil, nil, "rwm")
+ }
+
+ return nil
+}
+
+// based on getDevices from runc (libcontainer/devices/devices.go)
+func getDevices(path string) ([]spec.LinuxDevice, error) {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ if rootless.IsRootless() && os.IsPermission(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ out := []spec.LinuxDevice{}
+ for _, f := range files {
+ switch {
+ case f.IsDir():
+ switch f.Name() {
+ // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825
+ case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
+ continue
+ default:
+ sub, err := getDevices(filepath.Join(path, f.Name()))
+ if err != nil {
+ return nil, err
+ }
+ if sub != nil {
+ out = append(out, sub...)
+ }
+ continue
+ }
+ case f.Name() == "console":
+ continue
+ case f.Mode()&os.ModeSymlink != 0:
+ continue
+ }
+
+ device, err := DeviceFromPath(filepath.Join(path, f.Name()))
+ if err != nil {
+ if err == errNotADevice {
+ continue
+ }
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ out = append(out, *device)
+ }
+ return out, nil
+}
+
+// Copied from github.com/opencontainers/runc/libcontainer/devices
+// Given the path to a device look up the information about a linux device
+func DeviceFromPath(path string) (*spec.LinuxDevice, error) {
+ var stat unix.Stat_t
+ err := unix.Lstat(path, &stat)
+ if err != nil {
+ return nil, err
+ }
+ var (
+ devType string
+ mode = stat.Mode
+ devNumber = uint64(stat.Rdev) //nolint: unconvert
+ m = os.FileMode(mode)
+ )
+
+ switch {
+ case mode&unix.S_IFBLK == unix.S_IFBLK:
+ devType = "b"
+ case mode&unix.S_IFCHR == unix.S_IFCHR:
+ devType = "c"
+ case mode&unix.S_IFIFO == unix.S_IFIFO:
+ devType = "p"
+ default:
+ return nil, errNotADevice
+ }
+
+ return &spec.LinuxDevice{
+ Type: devType,
+ Path: path,
+ FileMode: &m,
+ UID: &stat.Uid,
+ GID: &stat.Gid,
+ Major: int64(unix.Major(devNumber)),
+ Minor: int64(unix.Minor(devNumber)),
+ }, nil
+}
diff --git a/pkg/util/utils_supported.go b/pkg/util/utils_supported.go
index 50e4b1b7b..b3d690158 100644
--- a/pkg/util/utils_supported.go
+++ b/pkg/util/utils_supported.go
@@ -7,13 +7,13 @@ package util
// should work to take darwin from this
import (
+ "errors"
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -51,12 +51,12 @@ func GetRuntimeDir() (string, error) {
if runtimeDir == "" {
home := os.Getenv("HOME")
if home == "" {
- rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
+ rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
return
}
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
- rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home)
+ rootlessRuntimeDirError = fmt.Errorf("cannot resolve %s: %w", home, err)
return
}
runtimeDir = filepath.Join(resolvedHome, "rundir")
@@ -80,7 +80,7 @@ func GetRootlessConfigHomeDir() (string, error) {
home := os.Getenv("HOME")
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
- rootlessConfigHomeDirError = errors.Wrapf(err, "cannot resolve %s", home)
+ rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err)
return
}
tmpDir := filepath.Join(resolvedHome, ".config")
@@ -115,7 +115,7 @@ func GetRootlessPauseProcessPidPath() (string, error) {
// files.
func GetRootlessPauseProcessPidPathGivenDir(libpodTmpDir string) (string, error) {
if libpodTmpDir == "" {
- return "", errors.Errorf("must provide non-empty temporary directory")
+ return "", errors.New("must provide non-empty temporary directory")
}
return filepath.Join(libpodTmpDir, "pause.pid"), nil
}
diff --git a/pkg/util/utils_unsupported.go b/pkg/util/utils_unsupported.go
index 896346493..3a0f8646b 100644
--- a/pkg/util/utils_unsupported.go
+++ b/pkg/util/utils_unsupported.go
@@ -3,11 +3,9 @@
package util
-import (
- "github.com/pkg/errors"
-)
+import "errors"
// FindDeviceNodes is not implemented anywhere except Linux.
func FindDeviceNodes() (map[string]string, error) {
- return nil, errors.Errorf("not supported on non-Linux OSes")
+ return nil, errors.New("not supported on non-Linux OSes")
}
diff --git a/pkg/util/utils_windows.go b/pkg/util/utils_windows.go
index b91680f7a..703e5472a 100644
--- a/pkg/util/utils_windows.go
+++ b/pkg/util/utils_windows.go
@@ -4,35 +4,36 @@
package util
import (
+ "errors"
+ "fmt"
"path/filepath"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
)
var errNotImplemented = errors.New("not yet implemented")
// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 unified mode.
func IsCgroup2UnifiedMode() (bool, error) {
- return false, errors.Wrap(errNotImplemented, "IsCgroup2Unified")
+ return false, fmt.Errorf("IsCgroup2Unified: %w", errNotImplemented)
}
// GetContainerPidInformationDescriptors returns a string slice of all supported
// format descriptors of GetContainerPidInformation.
func GetContainerPidInformationDescriptors() ([]string, error) {
- return nil, errors.Wrap(errNotImplemented, "GetContainerPidInformationDescriptors")
+ return nil, fmt.Errorf("GetContainerPidInformationDescriptors: %w", errNotImplemented)
}
// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for
// the pause process
func GetRootlessPauseProcessPidPath() (string, error) {
- return "", errors.Wrap(errNotImplemented, "GetRootlessPauseProcessPidPath")
+ return "", fmt.Errorf("GetRootlessPauseProcessPidPath: %w", errNotImplemented)
}
// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for
// the pause process
func GetRootlessPauseProcessPidPathGivenDir(unused string) (string, error) {
- return "", errors.Wrap(errNotImplemented, "GetRootlessPauseProcessPidPath")
+ return "", fmt.Errorf("GetRootlessPauseProcessPidPath: %w", errNotImplemented)
}
// GetRuntimeDir returns the runtime directory
diff --git a/podman.spec.rpkg b/podman.spec.rpkg
index c9127c2d9..30653b658 100644
--- a/podman.spec.rpkg
+++ b/podman.spec.rpkg
@@ -226,13 +226,13 @@ done
%{_unitdir}/%{name}.service
%{_unitdir}/%{name}.socket
%{_unitdir}/%{name}-restart.service
-%{_unitdir}/%{name}-play-kube@.service
+%{_unitdir}/%{name}-kube@.service
%{_userunitdir}/%{name}-auto-update.service
%{_userunitdir}/%{name}-auto-update.timer
%{_userunitdir}/%{name}.service
%{_userunitdir}/%{name}.socket
%{_userunitdir}/%{name}-restart.service
-%{_userunitdir}/%{name}-play-kube@.service
+%{_userunitdir}/%{name}-kube@.service
%{_tmpfilesdir}/%{name}.conf
%if 0%{?fedora} >= 36
%{_modulesloaddir}/%{name}-iptables.conf
diff --git a/rootless.md b/rootless.md
index 39c961d2a..f5d78b80b 100644
--- a/rootless.md
+++ b/rootless.md
@@ -8,7 +8,7 @@ Contributors are more than welcomed to help with this work. If you decide to ca
* The kernel does not allow processes without CAP_NET_BIND_SERVICE to bind to low ports.
* You can modify the `net.ipv4.ip_unprivileged_port_start` sysctl to change the lowest port. For example `sysctl net.ipv4.ip_unprivileged_port_start=443` allows rootless Podman containers to bind to ports >= 443.
* “How To” documentation is patchy at best.
-* If /etc/subuid and /etc/subgid are not setup for a user, then podman commands
+* If /etc/subuid and /etc/subgid are not set up for a user, then podman commands
can easily fail
* This can be a big issue on machines using Network Based Password information (FreeIPA, Active Directory, LDAP)
* We are working to get support for NSSWITCH on the /etc/subuid and /etc/subgid files.
@@ -24,7 +24,7 @@ can easily fail
* NFS and parallel filesystems enforce file creation on different UIDs on the server side and does not understand User Namespace.
* When a container root process like YUM attempts to create a file owned by a different UID, NFS Server/GPFS denies the creation.
* Does not work with homedirs mounted with noexec/nodev
- * User can setup storage to point to other directories they can write to that are not mounted noexec/nodev
+ * User can set up storage to point to other directories they can write to that are not mounted noexec/nodev
* Support for using native overlayfs as an unprivileged user is only available for Podman version >= 3.1 on a Linux kernel version >= 5.12, otherwise the slower _fuse-overlayfs_ may be used.
* A few Linux distributions (e.g. Ubuntu) have supported even older Podman and Linux kernel versions by modifying the normal Linux kernel behaviour.
* Only other supported driver is VFS.
diff --git a/test/apiv2/10-images.at b/test/apiv2/10-images.at
index fd04e3f1b..f03b95786 100644
--- a/test/apiv2/10-images.at
+++ b/test/apiv2/10-images.at
@@ -53,7 +53,7 @@ t POST "images/create?fromImage=alpine" 200 .error~null .status~".*Download comp
t POST "images/create?fromImage=alpine&tag=latest" 200
# 10977 - handle platform parameter correctly
-t POST "images/create?fromImage=testimage:20210610&platform=linux/arm64" 200
+t POST "images/create?fromImage=quay.io/libpod/testimage:20210610&platform=linux/arm64" 200
t GET "images/testimage:20210610/json" 200 \
.Architecture=arm64
@@ -225,6 +225,18 @@ t POST "images/load" ${TMPD}/test.tar 200 \
t GET libpod/images/quay.io/libpod/alpine:latest/exists 204
t GET libpod/images/quay.io/libpod/busybox:latest/exists 204
+CONTAINERFILE_WITH_ERR_TAR="${TMPD}/containerfile.tar"
+cat > $TMPD/containerfile << EOF
+FROM quay.io/fedora/fedora
+RUN echo 'some error' >&2
+EOF
+tar --format=posix -C $TMPD -cvf ${CONTAINERFILE_WITH_ERR_TAR} containerfile &> /dev/null
+t POST "build?q=1&dockerfile=containerfile" $CONTAINERFILE_WITH_ERR_TAR 200
+response_output=$(cat "$WORKDIR/curl.result.out")
+if [[ ${response_output} == *"some error"* ]];then
+ _show_ok 0 "compat quiet build" "~ $response_output" "found output from stderr in API"
+fi
+
cleanBuildTest
# vim: filetype=sh
diff --git a/test/apiv2/12-imagesMore.at b/test/apiv2/12-imagesMore.at
index 67b4f1c79..fc18dd2d7 100644
--- a/test/apiv2/12-imagesMore.at
+++ b/test/apiv2/12-imagesMore.at
@@ -6,6 +6,8 @@
red='\e[31m'
nc='\e[0m'
+start_registry
+
podman pull -q $IMAGE
t GET libpod/images/json 200 \
@@ -20,48 +22,51 @@ t GET libpod/images/$IMAGE/tree 200 \
t POST "libpod/images/nonesuch/tag?repo=myrepo&tag=mytag" 404
# Tag the image
-t POST "libpod/images/$IMAGE/tag?repo=localhost:5000/myrepo&tag=mytag" 201
+t POST "libpod/images/$IMAGE/tag?repo=localhost:$REGISTRY_PORT/myrepo&tag=mytag" 201
t GET libpod/images/$IMAGE/json 200 \
- .RepoTags[1]=localhost:5000/myrepo:mytag
-
-# Run registry container
-# FIXME this fails if python tests have been run first...
-podman run -d --name registry -p 5000:5000 quay.io/libpod/registry:2.7 /entrypoint.sh /etc/docker/registry/config.yml
-wait_for_port localhost 5000
-
-# Push to local registry and check output
-while read -r LINE
-do
- if echo "${LINE}" | jq --exit-status 'select( .status != null) | select ( .status | contains("digest: sha256:"))' &>/dev/null; then
- GOT_DIGEST="1"
- fi
-done < <(curl -sL "http://$HOST:$PORT/images/localhost:5000/myrepo/push?tlsVerify=false&tag=mytag" -XPOST)
-if [ -z "${GOT_DIGEST}" ] ; then
- echo -e "${red}not ok: did not found digest in output${nc}" 1>&2;
-fi
-
-# Push to local registry
-t POST "images/localhost:5000/myrepo/push?tlsVerify=false&tag=mytag" 200
+ .RepoTags[1]=localhost:$REGISTRY_PORT/myrepo:mytag
+
+# Push to local registry...
+t POST "images/localhost:$REGISTRY_PORT/myrepo/push?tlsVerify=false&tag=mytag" 200
+
+# ...and check output. We can't use our built-in checks because this output
+# is a sequence of JSON objects, i.e., individual ones, not in a JSON array.
+# The lines themselves are valid JSON, but taken together they are not.
+readarray lines <<<"$output"
+s0=$(jq -r .status <<<"${lines[0]}")
+is "$s0" "The push refers to repository [localhost:$REGISTRY_PORT/myrepo:mytag]" \
+ "Push to local registry: first status line"
+
+# FIXME: is there a way to test the actual digest?
+s1=$(jq -r .status <<<"${lines[1]}")
+like "$s1" "mytag: digest: sha256:[0-9a-f]\{64\} size: [0-9]\+" \
+ "Push to local registry: second status line"
# Untag the image
-t POST "libpod/images/$iid/untag?repo=localhost:5000/myrepo&tag=mytag" 201
+t POST "libpod/images/$iid/untag?repo=localhost:$REGISTRY_PORT/myrepo&tag=mytag" 201
# Try to push non-existing image
-t POST "images/localhost:5000/idonotexist/push?tlsVerify=false" 404
+t POST "images/localhost:$REGISTRY_PORT/idonotexist/push?tlsVerify=false" 404
t GET libpod/images/$IMAGE/json 200 \
.RepoTags[-1]=$IMAGE
-# Remove the registry container
-t DELETE libpod/containers/registry?force=true 200
-
-# Remove images
+# Remove image
t DELETE libpod/images/$IMAGE 200 \
.ExitCode=0
-t DELETE libpod/images/quay.io/libpod/registry:2.7 200 \
+
+podman pull -q $IMAGE
+
+# test podman image SCP
+# ssh needs to work so we can validate that the failure is past argument parsing
+podman system connection add --default test ssh://$USER@localhost/run/user/$UID/podman/podman.sock
+# should fail but need to check the output...
+# status 125 here means that the save/load fails due to
+# cirrus weirdness with exec.Command. All of the args have been parsed sucessfully.
+t POST "libpod/images/scp/$IMAGE?destination=QA::" 500 \
+ .cause="exit status 125"
+t DELETE libpod/images/$IMAGE 200 \
.ExitCode=0
-if [ -z "${GOT_DIGEST}" ] ; then
- exit 1;
-fi
+stop_registry
diff --git a/test/apiv2/15-manifest.at b/test/apiv2/15-manifest.at
index 0dd7026fa..970bed5a8 100644
--- a/test/apiv2/15-manifest.at
+++ b/test/apiv2/15-manifest.at
@@ -2,18 +2,40 @@
#
# Tests for manifest list endpoints
+start_registry
+
t POST /v3.4.0/libpod/manifests/create?name=abc 200 \
.Id~[0-9a-f]\\{64\\}
id_abc=$(jq -r '.Id' <<<"$output")
t POST /v4.0.0/libpod/manifests/xyz 201 \
.Id~[0-9a-f]\\{64\\}
-echo xyz $output
id_xyz=$(jq -r '.Id' <<<"$output")
t GET /v3.4.0/libpod/manifests/$id_abc/exists 204
t GET /v4.0.0/libpod/manifests/$id_xyz/exists 204
+id_abc_image=$($PODMAN_BIN --root $WORKDIR/server_root image build -q --format=docker -<<EOF
+FROM alpine
+RUN >file1
+EOF
+)
+
+id_xyz_image=$($PODMAN_BIN --root $WORKDIR/server_root image build -q --format=docker -<<EOF
+FROM alpine
+RUN >file2
+EOF
+)
+
+t POST /v3.4.0/libpod/manifests/$id_abc/add images="[\"containers-storage:$id_abc_image\"]" 200
+t PUT /v4.0.0/libpod/manifests/$id_xyz operation='update' images="[\"containers-storage:$id_xyz_image\"]" 200
+
+t POST "/v3.4.0/libpod/manifests/abc:latest/push?destination=localhost:$REGISTRY_PORT%2Fabc:latest&tlsVerify=false&all=true" 200
+t POST "/v4.0.0/libpod/manifests/xyz:latest/registry/localhost:$REGISTRY_PORT%2Fxyz:latest?tlsVerify=false&all=true" 200
+
# /v3.x cannot delete a manifest list
t DELETE /v4.0.0/libpod/manifests/$id_abc 200
t DELETE /v4.0.0/libpod/manifests/$id_xyz 200
+
+podman rmi -a
+stop_registry
diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at
index 4d32a1031..6ef4ef917 100644
--- a/test/apiv2/20-containers.at
+++ b/test/apiv2/20-containers.at
@@ -16,7 +16,12 @@ podman pull $ENV_WORKDIR_IMG &>/dev/null
# Ensure clean slate
podman rm -a -f &>/dev/null
-t GET "libpod/containers/json (at start: clean slate)" 200 length=0
+t GET "libpod/containers/json (at start: clean slate)" 200 \
+ "[]" \
+ length=0
+# check content type: https://github.com/containers/podman/issues/14647
+response_headers=$(cat "$WORKDIR/curl.headers.out")
+like "$response_headers" ".*Content-Type: application/json.*" "header does not contain application/json"
# Regression test for #12904 (race condition in logging code)
mytext="hi-there-$(random_string 15)"
@@ -45,16 +50,16 @@ t GET libpod/containers/json?all=true 200 \
.[0].IsInfra=false
# Test compat API for Network Settings (.Network is N/A when rootless)
-network_expect=
+network_expect="Networks=null"
if root; then
- network_expect='.[0].NetworkSettings.Networks.podman.NetworkID=podman'
+ network_expect="Networks.podman.NetworkID=podman"
fi
t GET /containers/json?all=true 200 \
length=1 \
.[0].Id~[0-9a-f]\\{64\\} \
.[0].Image=$IMAGE \
.[0].Mounts~.*/tmp \
- $network_expect
+ .[0].NetworkSettings.$network_expect
# compat API imageid with sha256: prefix
t GET containers/json?limit=1 200 \
@@ -95,6 +100,17 @@ fi
t DELETE libpod/containers/$cid 200 .[0].Id=$cid
+# Issue #14676: make sure the stats show the memory limit specified for the container
+if root; then
+ CTRNAME=ctr-with-limit
+ podman run --name $CTRNAME -d -m 512m -v /tmp:/tmp $IMAGE top
+
+ t GET libpod/containers/$CTRNAME/stats?stream=false 200 \
+ .memory_stats.limit=536870912
+
+ podman rm -f $CTRNAME
+fi
+
# Issue #6799: it should be possible to start a container, even w/o args.
t POST libpod/containers/create?name=test_noargs Image=${IMAGE} 201 \
.Id~[0-9a-f]\\{64\\}
@@ -239,6 +255,7 @@ t GET containers/$cid/json 200 \
t POST containers/create Image=$IMAGE Entrypoint='["top"]' 201 \
.Id~[0-9a-f]\\{64\\}
cid_top=$(jq -r '.Id' <<<"$output")
+
t GET containers/${cid_top}/json 200 \
.Config.Entrypoint[0]="top" \
.Config.Cmd='[]' \
@@ -477,10 +494,35 @@ for endpoint in containers/create libpod/containers/create; do
t POST libpod/containers/$cid/init 204
- t GET libpod/containers/$cid/json 200
+ t GET libpod/containers/$cid/json 200 \
+ .HostsPath=""
t DELETE containers/$cid 204
done
stop_service
start_service
+
+# Our states are different from Docker's.
+# Regression test for #14700 (Docker compat returning unknown "initialized" for status.status) to ensure the stay compatible
+podman create --name status-test $IMAGE sh -c "sleep 3"
+t GET containers/status-test/json 200 .State.Status="created"
+
+podman init status-test
+t GET containers/status-test/json 200 .State.Status="created"
+
+podman start status-test
+t GET containers/status-test/json 200 .State.Status="running"
+
+podman pause status-test
+t GET containers/status-test/json 200 .State.Status="paused"
+
+podman unpause status-test
+t GET containers/status-test/json 200 .State.Status="running"
+
+podman stop status-test &
+sleep 1
+t GET containers/status-test/json 200 .State.Status="stopping"
+
+sleep 3
+t GET containers/status-test/json 200 .State.Status="exited"
diff --git a/test/apiv2/27-containersEvents.at b/test/apiv2/27-containersEvents.at
index a86f2e353..e0a66e0ac 100644
--- a/test/apiv2/27-containersEvents.at
+++ b/test/apiv2/27-containersEvents.at
@@ -18,6 +18,10 @@ t GET "libpod/events?stream=false&since=$START" 200 \
'select(.status | contains("died")).Action=died' \
'select(.status | contains("died")).Actor.Attributes.containerExitCode=1'
+t GET "libpod/events?stream=false&since=$START" 200 \
+ 'select(.status | contains("start")).Action=start' \
+ 'select(.status | contains("start")).HealthStatus='\
+
# compat api, uses status=die (#12643)
t GET "events?stream=false&since=$START" 200 \
'select(.status | contains("start")).Action=start' \
diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at
index 4aad4563d..fcff26521 100644
--- a/test/apiv2/35-networks.at
+++ b/test/apiv2/35-networks.at
@@ -78,8 +78,8 @@ t GET networks?filters="{\"id\":[\"$network1_id\"]}" 200 \
.[0].Name=network1 \
.[0].Id=$network1_id
# invalid filter
-t GET networks?filters='{"dangling":["1"]}' 500 \
- .cause='invalid filter "dangling"'
+t GET networks?filters='{"dangling":["true","0"]}' 500 \
+ .cause="got more than one value for filter key \"dangling\""
# (#9293 with no networks the endpoint should return empty array instead of null)
t GET networks?filters='{"name":["doesnotexists"]}' 200 \
"[]"
diff --git a/test/apiv2/40-pods.at b/test/apiv2/40-pods.at
index 0a5201213..80724a8d9 100644
--- a/test/apiv2/40-pods.at
+++ b/test/apiv2/40-pods.at
@@ -134,4 +134,6 @@ t GET libpod/pods/json?filters='{"label":["testl' 400 \
t DELETE libpod/pods/foo 200
t DELETE "libpod/pods/foo (pod has already been deleted)" 404
+t_timeout 5 GET "libpod/pods/stats?stream=true&delay=1" 200
+
# vim: filetype=sh
diff --git a/test/apiv2/60-auth.at b/test/apiv2/60-auth.at
index 1e087d12b..465b0a96d 100644
--- a/test/apiv2/60-auth.at
+++ b/test/apiv2/60-auth.at
@@ -3,7 +3,7 @@
# registry-related tests
#
-start_registry
+start_registry htpasswd
# Test unreachable
t POST /v1.40/auth username=$REGISTRY_USERNAME password=WrOnGPassWord serveraddress=does.not.exist.io:1234/ \
@@ -26,3 +26,5 @@ t POST /v1.40/auth username=$REGISTRY_USERNAME password=$REGISTRY_PASSWORD serve
200 \
.Status="Login Succeeded" \
.IdentityToken=""
+
+stop_registry
diff --git a/test/apiv2/70-short-names.at b/test/apiv2/70-short-names.at
index a5087c115..bd7f8e7bd 100644
--- a/test/apiv2/70-short-names.at
+++ b/test/apiv2/70-short-names.at
@@ -6,11 +6,16 @@
# Pull the libpod/quay image which is used in all tests below.
t POST "images/create?fromImage=quay.io/libpod/alpine:latest" 200 .error~null .status~".*Download complete.*"
+# 14291 - let a short-name resolve to a *local* non Docker-Hub image.
+t POST containers/create Image=alpine 201 .Id~[0-9a-f]\\{64\\}
+cid=$(jq -r '.Id' <<<"$output")
+t GET containers/$cid/json 200 .Image="quay.io/libpod/alpine:latest"
+podman rm -f $cid
########## TAG
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
-t DELETE "images/foo" 200
+t DELETE "images/docker.io/library/foo" 200
########## BUILD
@@ -52,9 +57,6 @@ t DELETE "images/foo" 200
########## TAG
-# Looking up 'alpine' will fail as it gets normalized to docker.io.
-t POST "images/alpine/tag?repo=foo" 404 .cause="image not known"
-
# The libpod endpoint will resolve to it without issues.
t GET "libpod/images/alpine/exists" 204
@@ -67,22 +69,21 @@ t GET "libpod/images/docker.io/library/foo/exists" 204
########## REMOVE
-t DELETE "images/alpine" 404 .cause="image not known" # fails since docker.io/library/alpine does not exist
t DELETE "images/foo" 200 # removes the previously tagged image
########## GET
# Same procedure as above but with the /get endpoint.
-t GET "images/alpine/get" 404 .cause="image not known"
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t GET "images/foo/get" 200 '[POSIX tar archive]'
t DELETE "images/foo" 200
+t GET "images/alpine/get" 200
########## HISTORY
-t GET "images/alpine/history" 404 .cause="image not known"
+t GET "images/alpine/history" 200
t GET "images/quay.io/libpod/alpine/history" 200
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t GET "libpod/images/foo/history" 200
@@ -91,7 +92,7 @@ t DELETE "images/foo" 200
########## PUSH
-t POST "images/alpine/push?destination=localhost:9999/do/not:exist" 404 .cause="image not known"
+t POST "images/alpine/push?destination=localhost:9999/do:exist" 200
t POST "images/quay.io/libpod/alpine/push?destination=localhost:9999/do/not:exist" 200 # Error is in the response
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t POST "images/foo/push?destination=localhost:9999/do/not:exist" 200 # Error is in the response
@@ -100,7 +101,7 @@ t DELETE "images/foo"
########## CREATE A CONTAINER
-t POST "containers/create" Image=alpine 404 .cause="image not known"
+t POST "containers/create" Image=alpine 201
t POST "containers/create" Image=quay.io/libpod/alpine:latest 201
cid=$(jq -r '.Id' <<<"$output")
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
@@ -113,7 +114,7 @@ t DELETE "containers/$cid"
t POST "containers/create" Image=quay.io/libpod/alpine:latest 201
cid=$(jq -r '.Id' <<<"$output")
-t GET "images/alpine/get" 404 .cause="image not known"
+t GET "images/alpine/get" 200
t POST "commit?container=$cid&repo=foo&tag=tag" 201
t GET "images/foo/get" 404 .cause="image not known"
t GET "images/foo:tag/get" 200
@@ -127,7 +128,7 @@ t DELETE "containers/$cid"
# disable the docker.io enforcement.
stop_service
-CONTAINERS_CONF=$(pwd)/test/apiv2/containers.conf start_service
+CONTAINERS_CONF=$TESTS_DIR/containers.conf start_service
t POST "images/create?fromImage=quay.io/libpod/alpine:latest" 200 .error~null .status~".*Download complete.*"
t POST "images/alpine/tag?repo=foo" 201
diff --git a/test/apiv2/python/rest_api/fixtures/api_testcase.py b/test/apiv2/python/rest_api/fixtures/api_testcase.py
index 155e93928..f47136555 100644
--- a/test/apiv2/python/rest_api/fixtures/api_testcase.py
+++ b/test/apiv2/python/rest_api/fixtures/api_testcase.py
@@ -64,6 +64,10 @@ class APITestCase(unittest.TestCase):
def uri(path):
return APITestCase.PODMAN_URL + "/v2.0.0/libpod" + path
+ @staticmethod
+ def compat_uri(path):
+ return APITestCase.PODMAN_URL + "/v3.0.0/" + path
+
def resolve_container(self, path):
"""Find 'first' container and return 'Id' formatted into given URI path."""
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_container.py b/test/apiv2/python/rest_api/test_v2_0_0_container.py
index a44786c0d..a6cd93a1a 100644
--- a/test/apiv2/python/rest_api/test_v2_0_0_container.py
+++ b/test/apiv2/python/rest_api/test_v2_0_0_container.py
@@ -1,10 +1,12 @@
import multiprocessing
import queue
import random
+import subprocess
import threading
import unittest
import requests
+import os
import time
from dateutil.parser import parse
@@ -358,5 +360,50 @@ class ContainerTestCase(APITestCase):
self.assertEqual(1000, out["HostConfig"]["Memory"])
+
+def execute_process(cmd):
+ return subprocess.run(
+ cmd,
+ shell=True,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+def create_named_network_ns(network_ns_name):
+ execute_process(f"ip netns add {network_ns_name}")
+ execute_process(f"ip netns exec {network_ns_name} ip link add enp2s0 type veth peer name eth0")
+ execute_process(f"ip netns exec {network_ns_name} ip addr add 10.0.1.0/24 dev eth0")
+ execute_process(f"ip netns exec {network_ns_name} ip link set eth0 up")
+ execute_process(f"ip netns exec {network_ns_name} ip link add enp2s1 type veth peer name eth1")
+ execute_process(f"ip netns exec {network_ns_name} ip addr add 10.0.2.0/24 dev eth1")
+ execute_process(f"ip netns exec {network_ns_name} ip link set eth1 up")
+
+def delete_named_network_ns(network_ns_name):
+ execute_process(f"ip netns delete {network_ns_name}")
+
+class ContainerCompatibleAPITestCase(APITestCase):
+ def test_inspect_network(self):
+ if os.getuid() != 0:
+ self.skipTest("test needs to be executed as root!")
+ try:
+ network_ns_name = "test-compat-api"
+ create_named_network_ns(network_ns_name)
+ self.podman.run("rm", "--all", "--force", check=True)
+ self.podman.run("run", "--net", f"ns:/run/netns/{network_ns_name}", "-d", "alpine", "top", check=True)
+
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ r = requests.get(self.compat_uri(self.resolve_container("/containers/{}/json")))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertId(r.content)
+ out = r.json()
+
+ self.assertEqual("10.0.2.0", out["NetworkSettings"]["SecondaryIPAddresses"][0]["Addr"])
+ self.assertEqual(24, out["NetworkSettings"]["SecondaryIPAddresses"][0]["PrefixLen"])
+ finally:
+ delete_named_network_ns(network_ns_name)
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/apiv2/test-apiv2 b/test/apiv2/test-apiv2
index c3545522e..0fd282854 100755
--- a/test/apiv2/test-apiv2
+++ b/test/apiv2/test-apiv2
@@ -23,6 +23,8 @@ REGISTRY_IMAGE="${PODMAN_TEST_IMAGE_REGISTRY}/${PODMAN_TEST_IMAGE_USER}/registry
###############################################################################
# BEGIN setup
+USER=$PODMAN_ROOTLESS_USER
+UID=$PODMAN_ROOTLESS_UID
TMPDIR=${TMPDIR:-/tmp}
WORKDIR=$(mktemp --tmpdir -d $ME.tmp.XXXXXX)
@@ -54,6 +56,9 @@ fi
# Path to podman binary
PODMAN_BIN=${PODMAN:-${CONTAINERS_HELPER_BINARY_DIR}/podman}
+# Timeout for streamed responses
+CURL_TIMEOUT=0
+
# Cleanup handlers
clean_up_server() {
if [ -n "$service_pid" ]; then
@@ -62,7 +67,7 @@ clean_up_server() {
podman rm -a
podman rmi -af
- stop_registry
+ stop_registry --cleanup
stop_service
fi
}
@@ -87,6 +92,7 @@ trap err_handler ERR
#########
function die() {
echo "$ME: $*" >&2
+ clean_up_server
exit 1
}
@@ -214,24 +220,45 @@ function jsonify() {
}
#######
+# t_timeout # Timeout wrapper for test helper
+#######
+function t_timeout() {
+ CURL_TIMEOUT=$1; shift
+ local min_runtime=$((CURL_TIMEOUT - 1))
+ start=`date +%s`
+ t $@
+ local end=`date +%s`
+ local runtime=$((end-start))
+ if ! [[ "$runtime" -ge "$min_runtime" ]]; then
+ die "Error: Streaming time should be greater or equal to '$min_runtime'"
+ fi
+}
+
+#######
# t # Main test helper
#######
function t() {
local method=$1; shift
local path=$1; shift
- local curl_args
+ local -a curl_args
local content_type="application/json"
local testname="$method $path"
- # POST requests may be followed by one or more key=value pairs.
+
+ if [[ $CURL_TIMEOUT != 0 ]]; then
+ local c_timeout=$CURL_TIMEOUT
+ curl_args+=("-m $CURL_TIMEOUT")
+ CURL_TIMEOUT=0 # 'consume' timeout
+ fi
+ # POST and PUT requests may be followed by one or more key=value pairs.
# Slurp the command line until we see a 3-digit status code.
- if [[ $method = "POST" ]]; then
+ if [[ $method = "POST" || $method == "PUT" ]]; then
local -a post_args
for arg; do
case "$arg" in
*=*) post_args+=("$arg");
shift;;
- *.tar) curl_args="--data-binary @$arg" ;
+ *.tar) curl_args+=(--data-binary @$arg);
content_type="application/x-tar";
shift;;
application/*) content_type="$arg";
@@ -241,8 +268,8 @@ function t() {
esac
done
if [[ -z "$curl_args" ]]; then
- curl_args="-d $(jsonify ${post_args[@]})"
- testname="$testname [$curl_args]"
+ curl_args=(-d $(jsonify ${post_args[@]}))
+ testname="$testname [${curl_args[@]}]"
fi
fi
@@ -269,7 +296,7 @@ function t() {
# curl -X HEAD but without --head seems to wait for output anyway
if [[ $method == "HEAD" ]]; then
- curl_args="--head"
+ curl_args+=("--head")
fi
local expected_code=$1; shift
@@ -281,16 +308,15 @@ function t() {
# -s = silent, but --write-out 'format' gives us important response data
# The hairy "{ ...;rc=$?; } || :" lets us capture curl's exit code and
# give a helpful diagnostic if it fails.
- { response=$(curl -s -X $method ${curl_args} \
+ { response=$(curl -s -X $method "${curl_args[@]}" \
-H "Content-type: $content_type" \
--dump-header $WORKDIR/curl.headers.out \
--write-out '%{http_code}^%{content_type}^%{time_total}' \
-o $WORKDIR/curl.result.out "$url"); rc=$?; } || :
# Any error from curl is instant bad news, from which we can't recover
- if [[ $rc -ne 0 ]]; then
- echo "FATAL: curl failure ($rc) on $url - cannot continue" >&2
- exit 1
+ if [[ $rc -ne 0 ]] && [[ $c_timeout -eq 0 ]]; then
+ die "curl failure ($rc) on $url - cannot continue"
fi
# Show returned headers (without trailing ^M or empty lines) in log file.
@@ -380,11 +406,17 @@ function start_service() {
die "Cannot start service on non-localhost ($HOST)"
fi
- echo "rootdir: "$WORKDIR
- # Some tests use shortnames; force registry override to work around
- # docker.io throttling.
-# FIXME esm revisit pulling expected images re: shortnames caused tests to fail
-# env CONTAINERS_REGISTRIES_CONF=$TESTS_DIR/../registries.conf
+ # FIXME: EXPERIMENTAL: 2022-06-13: podman rootless needs a namespace. If
+ # system-service is the first podman command run (as is the case in CI)
+ # this will happen as a fork-exec, where the parent podman creates the
+ # namespace and the child is the server. Then, when stop_service() kills
+ # the parent, the child (server) happily stays alive and ruins subsequent
+ # tests that try to restart service with different settings.
+ # Workaround: run an unshare to get namespaces initialized.
+ if [[ $(id -u) != 0 ]]; then
+ $PODMAN_BIN unshare true
+ fi
+
$PODMAN_BIN \
--root $WORKDIR/server_root --syslog=true \
system service \
@@ -392,6 +424,7 @@ function start_service() {
tcp:127.0.0.1:$PORT \
&> $WORKDIR/server.log &
service_pid=$!
+ echo "# started service, pid $service_pid"
wait_for_port $HOST $PORT
}
@@ -401,7 +434,14 @@ function stop_service() {
if [[ -n $service_pid ]]; then
kill $service_pid || :
wait $service_pid || :
+ echo "# stopped service, pid $service_pid"
fi
+ service_pid=
+
+ if { exec 3<> /dev/tcp/$HOST/$PORT; } &>/dev/null; then
+ echo "# WARNING: stop_service: Service still running on port $PORT"
+ fi
+
}
####################
@@ -411,15 +451,17 @@ REGISTRY_PORT=
REGISTRY_USERNAME=
REGISTRY_PASSWORD=
function start_registry() {
- # We can be invoked multiple times, e.g. from different subtests, but
- # let's assume that once started we only kill it at the end of tests.
+ # We can be called multiple times, but each time should start a new
+ # registry container with (possibly) different configuration. That
+ # means that all callers must be responsible for invoking stop_registry.
if [[ -n "$REGISTRY_PORT" ]]; then
- return
+ die "start_registry invoked twice in succession, without stop_registry"
fi
+ # First arg is auth type (default: "none", but can also be "htpasswd")
+ local auth="${1:-none}"
+
REGISTRY_PORT=$(random_port)
- REGISTRY_USERNAME=u$(random_string 7)
- REGISTRY_PASSWORD=p$(random_string 7)
local REGDIR=$WORKDIR/registry
local AUTHDIR=$REGDIR/auth
@@ -433,42 +475,65 @@ function start_registry() {
podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE ||
podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE
- # Create a local cert and credentials
- # FIXME: is there a hidden "--quiet" flag? This is too noisy.
- openssl req -newkey rsa:4096 -nodes -sha256 \
- -keyout $AUTHDIR/domain.key -x509 -days 2 \
- -out $AUTHDIR/domain.crt \
- -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=registry host certificate" \
- -addext subjectAltName=DNS:localhost
- htpasswd -Bbn ${REGISTRY_USERNAME} ${REGISTRY_PASSWORD} \
- > $AUTHDIR/htpasswd
+ # Create a local cert (no need to do this more than once)
+ if [[ ! -e $AUTHDIR/domain.key ]]; then
+ # FIXME: is there a hidden "--quiet" flag? This is too noisy.
+ openssl req -newkey rsa:4096 -nodes -sha256 \
+ -keyout $AUTHDIR/domain.key -x509 -days 2 \
+ -out $AUTHDIR/domain.crt \
+ -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=registry host certificate" \
+ -addext subjectAltName=DNS:localhost
+ fi
+
+ # If invoked with auth=htpasswd, create credentials
+ REGISTRY_USERNAME=
+ REGISTRY_PASSWORD=
+ declare -a registry_auth_params=(-e "REGISTRY_AUTH=$auth")
+ if [[ "$auth" = "htpasswd" ]]; then
+ REGISTRY_USERNAME=u$(random_string 7)
+ REGISTRY_PASSWORD=p$(random_string 7)
+
+ htpasswd -Bbn ${REGISTRY_USERNAME} ${REGISTRY_PASSWORD} \
+ > $AUTHDIR/htpasswd
+
+ registry_auth_params+=(
+ -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm"
+ -e "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd"
+ )
+ fi
# Run the registry, and wait for it to come up
podman ${PODMAN_REGISTRY_ARGS} run -d \
-p ${REGISTRY_PORT}:5000 \
--name registry \
-v $AUTHDIR:/auth:Z \
- -e "REGISTRY_AUTH=htpasswd" \
- -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
- -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
+ "${registry_auth_params[@]}" \
-e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt \
-e REGISTRY_HTTP_TLS_KEY=/auth/domain.key \
${REGISTRY_IMAGE}
wait_for_port localhost $REGISTRY_PORT 10
+ echo "# started registry (auth=$auth) on port $PORT"
}
function stop_registry() {
local REGDIR=${WORKDIR}/registry
if [[ -d $REGDIR ]]; then
local OPTS="--root ${REGDIR}/root --runroot ${REGDIR}/runroot"
- podman $OPTS stop -f -t 0 -a
+ podman $OPTS stop -i -t 0 registry
# rm/rmi are important when running rootless: without them we
# get EPERMS in tmpdir cleanup because files are owned by subuids.
- podman $OPTS rm -f -a
- podman $OPTS rmi -f -a
+ podman $OPTS rm -f -i registry
+ if [[ "$1" = "--cleanup" ]]; then
+ podman $OPTS rmi -f -a
+ fi
+ echo "# stopped registry on port $PORT"
fi
+
+ REGISTRY_PORT=
+ REGISTRY_USERNAME=
+ REGISTRY_PASSWORD=
}
#################
diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas
index 1ca171c4a..0b691dd0e 100755
--- a/test/buildah-bud/apply-podman-deltas
+++ b/test/buildah-bud/apply-podman-deltas
@@ -61,7 +61,12 @@ function _skip() {
local skip=$1; shift
local reason=$1; shift
- # All further arguments are test names
+ # All further arguments are test names. Make sure we're invoked with some!
+ if [[ -z "$*" ]]; then
+ echo "$ME: FATAL: Invalid use of '${FUNCNAME[1]}' at line ${BASH_LINENO[1]}: missing test-name argument(s)." >&2
+ exit 1
+ fi
+
for t in "$@"; do
if fgrep -qx "@test \"$t\" {" $BUD; then
$ECHO "@test \"$t\" : $skip \"$reason\""
@@ -150,13 +155,7 @@ errmsg "checking authfile: stat /tmp/nonexistent: no such file or directory" \
###############################################################################
# BEGIN tests that don't make sense under podman due to fundamental differences
-# TODO
-# Normally, when buildah exits 1 on error, podman exits 125.
-# These tests are the exception. They exit 1 under podman.
-skip "these tests exit 1 under podman, not 125" \
- "bud with --add-host" \
- "bud - invalid runtime flags test"
-
+# Fails with "Error: no context directory and no Containerfile specified"
skip "does not work under podman" \
"bud without any arguments should succeed"
@@ -167,8 +166,9 @@ skip "does not work under podman" \
skip "FIXME FIXME FIXME: argument-order incompatible with podman" \
"bud-squash-hardlinks"
-skip "FIXME FIXME FIXME: this passes on Ed's laptop, fails in CI??" \
- "bud-multi-stage-nocache-nocommit"
+# Fails with "Error: context must be a directory: /path/to/Dockerfile"
+skip "podman-build fails with 'context must be a directory'" \
+ "bud with specified context should succeed if context contains existing Dockerfile"
###############################################################################
# BEGIN tests which are skipped because they make no sense under podman-remote
@@ -193,6 +193,13 @@ skip_if_remote "volumes don't work with podman-remote" \
"buildah bud --volume" \
"buildah-bud-policy"
+skip_if_remote "--build-context option not implemented in podman-remote" \
+ "build-with-additional-build-context and COPY, additional context from host" \
+ "build-with-additional-build-context and RUN --mount=from=, additional-context not image and also test conflict with stagename" \
+
+skip_if_remote "env-variable for Containerfile.in pre-processing is not propogated on remote" \
+ "bud with Containerfile.in, via envariable" \
+
# Requires a local file outside context dir
skip_if_remote "local keyfile not sent to podman-remote" \
"bud with encrypted FROM image"
@@ -210,6 +217,12 @@ skip_if_remote "--output option not implemented in podman-remote" \
"build with custom build output and output rootfs to tar by pipe" \
"build with custom build output must fail for bad input"
+# https://github.com/containers/podman/issues/14544
+skip_if_remote "logfile not implemented on remote" "bud-logfile-with-split-logfile-by-platform"
+
+skip_if_remote "envariables do not automatically work with -remote." \
+ "build proxy"
+
###############################################################################
# BEGIN tests which are skipped due to actual podman or podman-remote bugs.
@@ -222,10 +235,12 @@ skip_if_remote "FIXME FIXME FIXME: find a way to clean up their podman calls" \
"bud with run should not leave mounts behind cleanup test" \
"bud with custom files in /run/ should persist cleanup test"
-skip_if_remote "Do envariables work with -remote? Please look into this." \
- "build proxy"
+# Under podman-remote, the "Ignoring <stdin>:5:2: error: #error" message
+# is never seen. (Not even as stdout/stderr on the server; Ed checked).
+skip_if_remote "FIXME FIXME FIXME: 'Ignoring' warning is never seen" \
+ "bud with preprocessor error"
+# END tests which are skipped due to actual podman or podman-remote bugs.
###############################################################################
-# Done.
exit $RC
diff --git a/test/buildah-bud/buildah-tests.diff b/test/buildah-bud/buildah-tests.diff
index 6fa36d904..399042240 100644
--- a/test/buildah-bud/buildah-tests.diff
+++ b/test/buildah-bud/buildah-tests.diff
@@ -1,15 +1,15 @@
-From 8a8fa1a75e0fa3261263afbc8c2504feb430df6a Mon Sep 17 00:00:00 2001
+From 6508e3df2a129554fdf8336d8a6f0cdcc6fd4832 Mon Sep 17 00:00:00 2001
From: Ed Santiago <santiago@redhat.com>
Date: Tue, 9 Feb 2021 17:28:05 -0700
Subject: [PATCH] tweaks for running buildah tests under podman
Signed-off-by: Ed Santiago <santiago@redhat.com>
---
- tests/helpers.bash | 69 ++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 66 insertions(+), 3 deletions(-)
+ tests/helpers.bash | 70 ++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 67 insertions(+), 3 deletions(-)
diff --git a/tests/helpers.bash b/tests/helpers.bash
-index e3087063..b3a8f5ee 100644
+index e3087063..178a486e 100644
--- a/tests/helpers.bash
+++ b/tests/helpers.bash
@@ -51,6 +51,23 @@ EOF
@@ -60,13 +60,13 @@ index e3087063..b3a8f5ee 100644
# There are various scenarios where we would like to execute `tests` as rootless user, however certain commands like `buildah mount`
# do not work in rootless session since a normal user cannot mount a filesystem unless they're in a user namespace along with its
# own mount namespace. In order to run such specific commands from a rootless session we must perform `buildah unshare`.
-@@ -247,8 +274,35 @@ function run_buildah() {
+@@ -247,8 +274,36 @@ function run_buildah() {
--retry) retry=3; shift;; # retry network flakes
esac
+ local podman_or_buildah=${BUILDAH_BINARY}
+ local _opts="${ROOTDIR_OPTS} ${BUILDAH_REGISTRY_OPTS}"
-+ if [[ $1 == "build" || $1 == "build-using-dockerfile" ]]; then
++ if [[ $1 == "build" || $1 == "build-using-dockerfile" || $1 == "bud" ]]; then
+ shift
+ # podman defaults to --layers=true; buildah to --false.
+ # If command line includes explicit --layers, leave it untouched,
@@ -82,10 +82,11 @@ index e3087063..b3a8f5ee 100644
+ _opts=
+ fi
+
-+ # podman always exits 125 where buildah exits 1 or 2 (or, in the
-+ # case of git, 128, which is a bug in git, but I won't harp on that).
++ # Special case: there's one test that invokes git in such
++ # a way that it exits 128 (which IMO is a bug in git).
++ # podman exits 125 in that case.
+ case $expected_rc in
-+ 1|2|128) expected_rc=125 ;;
++ 128) expected_rc=125 ;;
+ esac
+ fi
+ local cmd_basename=$(basename ${podman_or_buildah})
@@ -97,7 +98,7 @@ index e3087063..b3a8f5ee 100644
# If session is rootless and `buildah mount` is invoked, perform unshare,
# since normal user cannot mount a filesystem unless they're in a user namespace along with its own mount namespace.
-@@ -262,8 +316,8 @@ function run_buildah() {
+@@ -262,8 +317,8 @@ function run_buildah() {
retry=$(( retry - 1 ))
# stdout is only emitted upon error; this echo is to help a debugger
@@ -108,7 +109,7 @@ index e3087063..b3a8f5ee 100644
# without "quotes", multiple lines are glommed together into one
if [ -n "$output" ]; then
echo "$output"
-@@ -595,6 +649,15 @@ function skip_if_no_docker() {
+@@ -595,6 +650,15 @@ function skip_if_no_docker() {
fi
}
@@ -125,5 +126,5 @@ index e3087063..b3a8f5ee 100644
daemondir=${TEST_SCRATCH_DIR}/git-daemon
mkdir -p ${daemondir}/repo
--
-2.35.1
+2.35.3
diff --git a/test/buildah-bud/run-buildah-bud-tests b/test/buildah-bud/run-buildah-bud-tests
index eb8de5618..4ff062496 100755
--- a/test/buildah-bud/run-buildah-bud-tests
+++ b/test/buildah-bud/run-buildah-bud-tests
@@ -93,6 +93,12 @@ fi
# From here on out, any error is fatal
set -e
+# Run sudo early, to refresh the credentials cache. This is a NOP under CI,
+# but might be appreciated by developers who run this script, step away
+# during the git-checkout-buildah step, then come back twenty minutes later
+# to an expired sudo prompt and no tests have run.
+sudo --validate
+
# Before pulling buildah (while still cd'ed to podman repo), try to determine
# if this is a PR, and if so if it's a revendoring of buildah. We use this to
# try to offer a helpful hint on failure.
diff --git a/test/compose/disable_healthcheck/docker-compose.yml b/test/compose/disable_healthcheck/docker-compose.yml
new file mode 100644
index 000000000..1f608c895
--- /dev/null
+++ b/test/compose/disable_healthcheck/docker-compose.yml
@@ -0,0 +1,10 @@
+version: "3.7"
+services:
+ noHc:
+ image: alpine
+ container_name: noHc
+ ports:
+ - "4000:80"
+ restart: unless-stopped
+ healthcheck:
+ disable: true
diff --git a/test/compose/disable_healthcheck/tests.sh b/test/compose/disable_healthcheck/tests.sh
new file mode 100644
index 000000000..2460a687e
--- /dev/null
+++ b/test/compose/disable_healthcheck/tests.sh
@@ -0,0 +1,2 @@
+podman inspect --format='{{.Config.Healthcheck.Test}}' noHc
+like $output "[NONE]" "$testname: healthcheck properly disabled"
diff --git a/test/compose/update_network_mtu/docker-compose.yml b/test/compose/update_network_mtu/docker-compose.yml
new file mode 100644
index 000000000..fabd7b4f2
--- /dev/null
+++ b/test/compose/update_network_mtu/docker-compose.yml
@@ -0,0 +1,26 @@
+version: '3.7'
+
+services:
+ nginx:
+ image: alpine
+ ports:
+ - 8000:5000
+ networks:
+ - default
+ - macvlan_net
+
+networks:
+ default:
+ driver: bridge
+ driver_opts:
+ com.docker.network.bridge.name: docker0
+ com.docker.network.driver.mtu: 9000
+ macvlan_net:
+ driver: macvlan
+ driver_opts:
+ mode: bridge
+ ipam:
+ config:
+ -
+ subnet: 192.168.20.0/24
+ gateway: 192.168.20.1
diff --git a/test/compose/update_network_mtu/tests.sh b/test/compose/update_network_mtu/tests.sh
new file mode 100644
index 000000000..57411eb34
--- /dev/null
+++ b/test/compose/update_network_mtu/tests.sh
@@ -0,0 +1,10 @@
+# -*- bash -*-
+
+podman network inspect --format='{{ range . }} {{ .Options.mtu }} {{ end }}' update_network_mtu_default
+like "$output" "9000" "$testname : network mtu is set"
+
+podman network inspect --format='{{ range . }} {{ .NetworkInterface }} {{ end }}' update_network_mtu_default
+like "$output" "docker0" "$testname: network interface is set"
+
+podman network inspect --format='{{ range . }} {{ .Options.mode }} {{ end }}' update_network_mtu_macvlan_net
+like "$output" "bridge" "$testname : network mode is set"
diff --git a/test/e2e/benchmarks_test.go b/test/e2e/benchmarks_test.go
index ef4d51893..fe045b97a 100644
--- a/test/e2e/benchmarks_test.go
+++ b/test/e2e/benchmarks_test.go
@@ -132,7 +132,7 @@ var _ = Describe("Podman Benchmark Suite", func() {
Measure("Podman Benchmark Suite", func(b Benchmarker) {
registryOptions := &podmanRegistry.Options{
- Image: "docker-archive:" + imageTarPath(registry),
+ Image: "docker-archive:" + imageTarPath(REGISTRY_IMAGE),
}
for i := range allBenchmarks {
diff --git a/test/e2e/build/Containerfile.with-platform b/test/e2e/build/Containerfile.with-platform
new file mode 100644
index 000000000..3bb585a0a
--- /dev/null
+++ b/test/e2e/build/Containerfile.with-platform
@@ -0,0 +1 @@
+FROM --platform=$TARGETPLATFORM alpine
diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go
index dcdd17143..9ecc2f8c6 100644
--- a/test/e2e/build_test.go
+++ b/test/e2e/build_test.go
@@ -85,7 +85,7 @@ var _ = Describe("Podman build", func() {
})
It("podman build with a secret from file and verify if secret file is not leaked into image", func() {
- session := podmanTest.Podman([]string{"build", "-f", "build/secret-verify-leak/Containerfile.with-secret-verify-leak", "-t", "secret-test-leak", "--secret", "id=mysecret,src=build/secret.txt", "build/"})
+ session := podmanTest.Podman([]string{"build", "-f", "build/secret-verify-leak/Containerfile.with-secret-verify-leak", "-t", "secret-test-leak", "--secret", "id=mysecret,src=build/secret.txt", "build/secret-verify-leak"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(ContainSubstring("somesecret"))
@@ -178,6 +178,32 @@ var _ = Describe("Podman build", func() {
Expect(session).Should(Exit(0))
})
+ It("podman build verify explicit cache use with squash-all and --layers", func() {
+ session := podmanTest.Podman([]string{"build", "--pull-never", "-f", "build/squash/Dockerfile.squash-c", "--squash-all", "--layers", "-t", "test-squash-d:latest", "build/squash"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test-squash-d"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ // Check for one layers
+ Expect(strings.Fields(session.OutputToString())).To(HaveLen(1))
+
+ // Second build must use last squashed build from cache
+ session = podmanTest.Podman([]string{"build", "--pull-never", "-f", "build/squash/Dockerfile.squash-c", "--squash-all", "--layers", "-t", "test", "build/squash"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ // Test if entire build is used from cache
+ Expect(session.OutputToString()).To(ContainSubstring("Using cache"))
+
+ session = podmanTest.Podman([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test-squash-d"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ // Check for one layers
+ Expect(strings.Fields(session.OutputToString())).To(HaveLen(1))
+
+ })
+
It("podman build Containerfile locations", func() {
// Given
// Switch to temp dir and restore it afterwards
@@ -529,7 +555,7 @@ subdir**`
dd := exec.Command("dd", "if=/dev/random", "of="+randomFile, "bs=1G", "count=1")
ddSession, err := Start(dd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
- Eventually(ddSession).Should(Exit(0))
+ Eventually(ddSession, "10s", "1s").Should(Exit(0))
// make cwd as context root path
Expect(os.Chdir(contextDir)).ToNot(HaveOccurred())
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 787178cd3..5ccafeb37 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -23,10 +23,31 @@ import (
func getRunString(input []string) []string {
// CRIU does not work with seccomp correctly on RHEL7 : seccomp=unconfined
- runString := []string{"run", "-it", "--security-opt", "seccomp=unconfined", "-d", "--ip", GetRandomIPAddress()}
+ runString := []string{"run", "--security-opt", "seccomp=unconfined", "-d", "--ip", GetRandomIPAddress()}
return append(runString, input...)
}
+// FIXME FIXME FIXME: workaround for #14653, please remove this function
+// and all calls to it once that bug is fixed.
+func fixmeFixme14653(podmanTest *PodmanTestIntegration, cid string) {
+ if !IsRemote() {
+ // Race condition only affects podman-remote
+ return
+ }
+
+ // Wait for container to truly go away
+ for i := 0; i < 5; i++ {
+ ps := podmanTest.Podman([]string{"container", "exists", cid})
+ ps.WaitWithDefaultTimeout()
+ if ps.ExitCode() == 1 {
+ // yay, it's gone
+ return
+ }
+ time.Sleep(time.Second)
+ }
+ // Fall through. Container still exists, but return anyway.
+}
+
var _ = Describe("Podman checkpoint", func() {
var (
tempdir string
@@ -318,7 +339,7 @@ var _ = Describe("Podman checkpoint", func() {
It("podman checkpoint container with established tcp connections", func() {
// Broken on Ubuntu.
SkipIfNotFedora()
- localRunString := getRunString([]string{redis})
+ localRunString := getRunString([]string{REDIS_IMAGE})
session := podmanTest.Podman(localRunString)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -478,6 +499,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -530,6 +552,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -548,6 +571,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -566,6 +590,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -584,6 +609,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -645,6 +671,7 @@ var _ = Describe("Podman checkpoint", func() {
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -676,8 +703,8 @@ var _ = Describe("Podman checkpoint", func() {
})
It("podman checkpoint and restore container with root file-system changes using --ignore-rootfs during restore", func() {
// Start the container
- localRunString := getRunString([]string{"--rm", ALPINE, "top"})
- session := podmanTest.Podman(localRunString)
+ // test that restore works without network namespace (https://github.com/containers/podman/issues/14389)
+ session := podmanTest.Podman([]string{"run", "--network=none", "-d", "--rm", ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
@@ -694,6 +721,7 @@ var _ = Describe("Podman checkpoint", func() {
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -735,6 +763,7 @@ var _ = Describe("Podman checkpoint", func() {
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -772,6 +801,7 @@ var _ = Describe("Podman checkpoint", func() {
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -821,6 +851,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -890,6 +921,7 @@ var _ = Describe("Podman checkpoint", func() {
result = podmanTest.Podman([]string{"container", "checkpoint", cid, "-e", checkpointFileName})
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -1020,7 +1052,7 @@ var _ = Describe("Podman checkpoint", func() {
It("podman checkpoint and restore container with different port mappings", func() {
randomPort, err := utils.GetRandomPort()
Expect(err).ShouldNot(HaveOccurred())
- localRunString := getRunString([]string{"-p", fmt.Sprintf("%d:6379", randomPort), "--rm", redis})
+ localRunString := getRunString([]string{"-p", fmt.Sprintf("%d:6379", randomPort), "--rm", REDIS_IMAGE})
session := podmanTest.Podman(localRunString)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -1044,6 +1076,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -1140,6 +1173,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).To(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
@@ -1252,6 +1286,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -1296,6 +1331,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -1324,11 +1360,11 @@ var _ = Describe("Podman checkpoint", func() {
})
It("podman checkpoint and restore containers with --print-stats", func() {
- session1 := podmanTest.Podman(getRunString([]string{redis}))
+ session1 := podmanTest.Podman(getRunString([]string{REDIS_IMAGE}))
session1.WaitWithDefaultTimeout()
Expect(session1).Should(Exit(0))
- session2 := podmanTest.Podman(getRunString([]string{redis, "top"}))
+ session2 := podmanTest.Podman(getRunString([]string{REDIS_IMAGE, "top"}))
session2.WaitWithDefaultTimeout()
Expect(session2).Should(Exit(0))
@@ -1489,6 +1525,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -1518,6 +1555,93 @@ var _ = Describe("Podman checkpoint", func() {
os.Remove(fileName)
})
+ It("podman checkpoint container with export and verify non-default runtime", func() {
+ SkipIfRemote("podman-remote does not support --runtime flag")
+ // This test triggers the edge case where:
+ // 1. Default runtime is crun
+ // 2. Container is created with runc
+ // 3. Checkpoint without setting --runtime into archive
+ // 4. Restore without setting --runtime from archive
+ // It should be expected that podman identifies runtime
+ // from the checkpoint archive.
+
+ // Prevent --runtime arg from being set to force using default
+ // runtime unless explicitly set through passed args.
+ preservedMakeOptions := podmanTest.PodmanMakeOptions
+ podmanTest.PodmanMakeOptions = func(args []string, noEvents, noCache bool) []string {
+ defaultArgs := preservedMakeOptions(args, noEvents, noCache)
+ for i := range args {
+ // Runtime is set explicitly, so we should keep --runtime arg.
+ if args[i] == "--runtime" {
+ return defaultArgs
+ }
+ }
+ updatedArgs := make([]string, 0)
+ for i := 0; i < len(defaultArgs); i++ {
+ // Remove --runtime arg, letting podman fall back to its default
+ if defaultArgs[i] == "--runtime" {
+ i++
+ } else {
+ updatedArgs = append(updatedArgs, defaultArgs[i])
+ }
+ }
+ return updatedArgs
+ }
+
+ for _, runtime := range []string{"runc", "crun"} {
+ if err := exec.Command(runtime, "--help").Run(); err != nil {
+ Skip(fmt.Sprintf("%s not found in PATH; this test requires both runc and crun", runtime))
+ }
+ }
+
+ // Detect default runtime
+ session := podmanTest.Podman([]string{"info", "--format", "{{.Host.OCIRuntime.Name}}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ if defaultRuntime := session.OutputToString(); defaultRuntime != "crun" {
+ Skip(fmt.Sprintf("Default runtime is %q; this test requires crun to be default", defaultRuntime))
+ }
+
+ // Force non-default runtime "runc"
+ localRunString := getRunString([]string{"--runtime", "runc", "--rm", ALPINE, "top"})
+ session = podmanTest.Podman(localRunString)
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ cid := session.OutputToString()
+
+ session = podmanTest.Podman([]string{"inspect", "--format", "{{.OCIRuntime}}", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).To(Equal("runc"))
+
+ checkpointExportPath := "/tmp/checkpoint-" + cid + ".tar.gz"
+
+ session = podmanTest.Podman([]string{"container", "checkpoint", cid, "-e", checkpointExportPath})
+ session.WaitWithDefaultTimeout()
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(session).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"container", "restore", "-i", checkpointExportPath})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // The restored container should have the same runtime as the original container
+ session = podmanTest.Podman([]string{"inspect", "--format", "{{.OCIRuntime}}", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).To(Equal("runc"))
+
+ // Remove exported checkpoint
+ os.Remove(checkpointExportPath)
+ })
+
It("podman checkpoint container with export and try to change the runtime", func() {
SkipIfRemote("podman-remote does not support --runtime flag")
// This test will only run if runc and crun both exist
@@ -1573,6 +1697,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
@@ -1651,6 +1776,7 @@ var _ = Describe("Podman checkpoint", func() {
// As the container has been started with '--rm' it will be completely
// cleaned up after checkpointing.
Expect(result).Should(Exit(0))
+ fixmeFixme14653(podmanTest, cid)
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go
index c82e5e471..452a378c2 100644
--- a/test/e2e/commit_test.go
+++ b/test/e2e/commit_test.go
@@ -362,7 +362,7 @@ var _ = Describe("Podman commit", func() {
Expect(images[0].Config.ExposedPorts).To(HaveKey("80/tcp"))
name = "testcon2"
- s = podmanTest.Podman([]string{"run", "--name", name, "-d", nginx})
+ s = podmanTest.Podman([]string{"run", "--name", name, "-d", NGINX_IMAGE})
s.WaitWithDefaultTimeout()
Expect(s).Should(Exit(0))
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index db194b777..43367cf63 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -2,6 +2,7 @@ package integration
import (
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"math/rand"
@@ -30,18 +31,17 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
//lint:ignore ST1003
- PODMAN_BINARY string //nolint:revive,stylecheck
- INTEGRATION_ROOT string //nolint:revive,stylecheck
- CGROUP_MANAGER = "systemd" //nolint:revive,stylecheck
- RESTORE_IMAGES = []string{ALPINE, BB, nginx} //nolint:revive,stylecheck
+ PODMAN_BINARY string //nolint:revive,stylecheck
+ INTEGRATION_ROOT string //nolint:revive,stylecheck
+ CGROUP_MANAGER = "systemd" //nolint:revive,stylecheck
+ RESTORE_IMAGES = []string{ALPINE, BB, NGINX_IMAGE} //nolint:revive,stylecheck
defaultWaitTimeout = 90
- CGROUPSV2, _ = cgroups.IsCgroup2UnifiedMode() //nolint:revive,stylecheck
+ CGROUPSV2, _ = cgroups.IsCgroup2UnifiedMode()
)
// PodmanTestIntegration struct for command line options
@@ -115,7 +115,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
podman := PodmanTestSetup("/tmp")
// Pull cirros but don't put it into the cache
- pullImages := []string{cirros, fedoraToolbox, volumeTest}
+ pullImages := []string{CIRROS_IMAGE, fedoraToolbox, volumeTest}
pullImages = append(pullImages, CACHE_IMAGES...)
for _, image := range pullImages {
podman.createArtifact(image)
@@ -322,7 +322,7 @@ func PodmanTestCreateUtil(tempDir string, remote bool) *PodmanTestIntegration {
}
}
- // Setup registries.conf ENV variable
+ // Set up registries.conf ENV variable
p.setDefaultRegistriesConfigEnv()
// Rewrite the PodmanAsUser function
p.PodmanMakeOptions = p.makeOptions
@@ -464,7 +464,7 @@ func (p *PodmanTestIntegration) RunNginxWithHealthCheck(name string) (*PodmanSes
podmanArgs = append(podmanArgs, "--name", name)
}
// curl without -f exits 0 even if http code >= 400!
- podmanArgs = append(podmanArgs, "-dt", "-P", "--health-cmd", "curl -f http://localhost/", nginx)
+ podmanArgs = append(podmanArgs, "-dt", "-P", "--health-cmd", "curl -f http://localhost/", NGINX_IMAGE)
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
return session, session.OutputToString()
@@ -618,14 +618,14 @@ func (p *PodmanTestIntegration) RunHealthCheck(cid string) error {
restart := p.Podman([]string{"restart", cid})
restart.WaitWithDefaultTimeout()
if restart.ExitCode() != 0 {
- return errors.Errorf("unable to restart %s", cid)
+ return fmt.Errorf("unable to restart %s", cid)
}
}
}
fmt.Printf("Waiting for %s to pass healthcheck\n", cid)
time.Sleep(1 * time.Second)
}
- return errors.Errorf("unable to detect %s as running", cid)
+ return fmt.Errorf("unable to detect %s as running", cid)
}
func (p *PodmanTestIntegration) CreateSeccompJSON(in []byte) (string, error) {
@@ -1042,18 +1042,15 @@ var IPRegex = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01
// digShort execs into the given container and does a dig lookup with a timeout
// backoff. If it gets a response, it ensures that the output is in the correct
// format and iterates a string array for match
-func digShort(container, lookupName string, matchNames []string, p *PodmanTestIntegration) {
+func digShort(container, lookupName, expectedIP string, p *PodmanTestIntegration) {
digInterval := time.Millisecond * 250
for i := 0; i < 6; i++ {
time.Sleep(digInterval * time.Duration(i))
dig := p.Podman([]string{"exec", container, "dig", "+short", lookupName})
dig.WaitWithDefaultTimeout()
- if dig.ExitCode() == 0 {
- output := dig.OutputToString()
- Expect(output).To(MatchRegexp(IPRegex))
- for _, name := range matchNames {
- Expect(output).To(Equal(name))
- }
+ output := dig.OutputToString()
+ if dig.ExitCode() == 0 && output != "" {
+ Expect(output).To(Equal(expectedIP))
// success
return
}
@@ -1064,13 +1061,13 @@ func digShort(container, lookupName string, matchNames []string, p *PodmanTestIn
// WaitForFile to be created in defaultWaitTimeout seconds, returns false if file not created
func WaitForFile(path string) (err error) {
until := time.Now().Add(time.Duration(defaultWaitTimeout) * time.Second)
- for i := 1; time.Now().Before(until); i++ {
+ for time.Now().Before(until) {
_, err = os.Stat(path)
switch {
case err == nil:
return nil
case errors.Is(err, os.ErrNotExist):
- time.Sleep(time.Duration(i) * time.Second)
+ time.Sleep(10 * time.Millisecond)
default:
return err
}
@@ -1078,18 +1075,21 @@ func WaitForFile(path string) (err error) {
return err
}
-// WaitForService blocks, waiting for some service listening on given host:port
+// WaitForService blocks for defaultWaitTimeout seconds, waiting for some service listening on given host:port
func WaitForService(address url.URL) {
// Wait for podman to be ready
- var conn net.Conn
var err error
- for i := 1; i <= 5; i++ {
+ until := time.Now().Add(time.Duration(defaultWaitTimeout) * time.Second)
+ for time.Now().Before(until) {
+ var conn net.Conn
conn, err = net.Dial("tcp", address.Host)
- if err != nil {
- // Podman not available yet...
- time.Sleep(time.Duration(i) * time.Second)
+ if err == nil {
+ conn.Close()
+ break
}
+
+ // Podman not available yet...
+ time.Sleep(10 * time.Millisecond)
}
Expect(err).ShouldNot(HaveOccurred())
- conn.Close()
}
diff --git a/test/e2e/config.go b/test/e2e/config.go
index 2ca8e2a15..a8dd6301f 100644
--- a/test/e2e/config.go
+++ b/test/e2e/config.go
@@ -1,7 +1,7 @@
package integration
var (
- redis = "quay.io/libpod/redis:alpine"
+ REDIS_IMAGE = "quay.io/libpod/redis:alpine" //nolint:revive,stylecheck
fedoraMinimal = "registry.fedoraproject.org/fedora-minimal:34"
ALPINE = "quay.io/libpod/alpine:latest"
ALPINELISTTAG = "quay.io/libpod/alpine:3.10.2"
@@ -10,12 +10,12 @@ var (
ALPINEAMD64ID = "961769676411f082461f9ef46626dd7a2d1e2b2a38e6a44364bcbecf51e66dd4"
ALPINEARM64DIGEST = "quay.io/libpod/alpine@sha256:f270dcd11e64b85919c3bab66886e59d677cf657528ac0e4805d3c71e458e525"
ALPINEARM64ID = "915beeae46751fc564998c79e73a1026542e945ca4f73dc841d09ccc6c2c0672"
- infra = "k8s.gcr.io/pause:3.2"
+ INFRA_IMAGE = "k8s.gcr.io/pause:3.2" //nolint:revive,stylecheck
BB = "quay.io/libpod/busybox:latest"
- healthcheck = "quay.io/libpod/alpine_healthcheck:latest"
+ HEALTHCHECK_IMAGE = "quay.io/libpod/alpine_healthcheck:latest" //nolint:revive,stylecheck
ImageCacheDir = "/tmp/podman/imagecachedir"
fedoraToolbox = "registry.fedoraproject.org/fedora-toolbox:36"
- volumeTest = "quay.io/libpod/volume-plugin-test-img:latest"
+ volumeTest = "quay.io/libpod/volume-plugin-test-img:20220623"
// This image has seccomp profiles that blocks all syscalls.
// The intention behind blocking all syscalls is to prevent
diff --git a/test/e2e/config_amd64.go b/test/e2e/config_amd64.go
index c4cb97b2e..f32542df8 100644
--- a/test/e2e/config_amd64.go
+++ b/test/e2e/config_amd64.go
@@ -1,16 +1,16 @@
package integration
var (
- STORAGE_FS = "vfs" //nolint:revive,stylecheck
- STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
- ROOTLESS_STORAGE_FS = "vfs" //nolint:revive,stylecheck
- ROOTLESS_STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
- CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, nginx, redis, registry, infra, labels, healthcheck, UBI_INIT, UBI_MINIMAL, fedoraToolbox} //nolint:revive,stylecheck
- nginx = "quay.io/libpod/alpine_nginx:latest"
- BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck
- registry = "quay.io/libpod/registry:2.6"
- labels = "quay.io/libpod/alpine_labels:latest"
- UBI_MINIMAL = "registry.access.redhat.com/ubi8-minimal" //nolint:revive,stylecheck
- UBI_INIT = "registry.access.redhat.com/ubi8-init" //nolint:revive,stylecheck
- cirros = "quay.io/libpod/cirros:latest"
+ STORAGE_FS = "vfs" //nolint:revive,stylecheck
+ STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
+ ROOTLESS_STORAGE_FS = "vfs" //nolint:revive,stylecheck
+ ROOTLESS_STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
+ CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, NGINX_IMAGE, REDIS_IMAGE, REGISTRY_IMAGE, INFRA_IMAGE, LABELS_IMAGE, HEALTHCHECK_IMAGE, UBI_INIT, UBI_MINIMAL, fedoraToolbox} //nolint:revive,stylecheck
+ NGINX_IMAGE = "quay.io/libpod/alpine_nginx:latest" //nolint:revive,stylecheck
+ BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck
+ REGISTRY_IMAGE = "quay.io/libpod/registry:2.6" //nolint:revive,stylecheck
+ LABELS_IMAGE = "quay.io/libpod/alpine_labels:latest" //nolint:revive,stylecheck
+ UBI_MINIMAL = "registry.access.redhat.com/ubi8-minimal" //nolint:revive,stylecheck
+ UBI_INIT = "registry.access.redhat.com/ubi8-init" //nolint:revive,stylecheck
+ CIRROS_IMAGE = "quay.io/libpod/cirros:latest" //nolint:revive,stylecheck
)
diff --git a/test/e2e/config_ppc64le.go b/test/e2e/config_ppc64le.go
index 569a34efb..a4bec748a 100644
--- a/test/e2e/config_ppc64le.go
+++ b/test/e2e/config_ppc64le.go
@@ -5,9 +5,9 @@ var (
STORAGE_OPTIONS = "--storage-driver overlay"
ROOTLESS_STORAGE_FS = "vfs"
ROOTLESS_STORAGE_OPTIONS = "--storage-driver vfs"
- CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, nginx, redis, infra, labels}
- nginx = "quay.io/libpod/alpine_nginx-ppc64le:latest"
+ CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, NGINX_IMAGE, REDIS_IMAGE, INFRA_IMAGE, LABELS_IMAGE}
+ NGINX_IMAGE = "quay.io/libpod/alpine_nginx-ppc64le:latest"
BB_GLIBC = "docker.io/ppc64le/busybox:glibc"
- labels = "quay.io/libpod/alpine_labels-ppc64le:latest"
- registry string
+ LABELS_IMAGE = "quay.io/libpod/alpine_labels-ppc64le:latest"
+ REGISTRY_IMAGE string
)
diff --git a/test/e2e/container_inspect_test.go b/test/e2e/container_inspect_test.go
index 5aed943da..436c60c05 100644
--- a/test/e2e/container_inspect_test.go
+++ b/test/e2e/container_inspect_test.go
@@ -58,7 +58,7 @@ var _ = Describe("Podman container inspect", func() {
It("podman inspect shows exposed ports on image", func() {
name := "testcon"
- session := podmanTest.Podman([]string{"run", "-d", "--expose", "8989", "--name", name, nginx})
+ session := podmanTest.Podman([]string{"run", "-d", "--expose", "8989", "--name", name, NGINX_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
diff --git a/test/e2e/create_staticip_test.go b/test/e2e/create_staticip_test.go
index 6fd88753b..85cc5023c 100644
--- a/test/e2e/create_staticip_test.go
+++ b/test/e2e/create_staticip_test.go
@@ -25,7 +25,7 @@ var _ = Describe("Podman create with --ip flag", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- // Cleanup the CNI networks used by the tests
+ // Clean up the CNI networks used by the tests
os.RemoveAll("/var/lib/cni/networks/podman")
})
diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go
index f02d9c88b..32deb04a8 100644
--- a/test/e2e/create_staticmac_test.go
+++ b/test/e2e/create_staticmac_test.go
@@ -25,7 +25,7 @@ var _ = Describe("Podman run with --mac-address flag", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- // Cleanup the CNI networks used by the tests
+ // Clean up the CNI networks used by the tests
os.RemoveAll("/var/lib/cni/networks/podman")
})
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index 63544d579..9679aad24 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -63,7 +63,7 @@ var _ = Describe("Podman create", func() {
lock := GetPortLock("5000")
defer lock.Unlock()
- session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -273,7 +273,7 @@ var _ = Describe("Podman create", func() {
It("podman run entrypoint and cmd test", func() {
name := "test101"
- create := podmanTest.Podman([]string{"create", "--name", name, redis})
+ create := podmanTest.Podman([]string{"create", "--name", name, REDIS_IMAGE})
create.WaitWithDefaultTimeout()
Expect(create).Should(Exit(0))
@@ -560,7 +560,7 @@ var _ = Describe("Podman create", func() {
session = podmanTest.Podman([]string{"create", "--umask", "9999", "--name", "bad", ALPINE})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
- Expect(session.ErrorToString()).To(ContainSubstring("Invalid umask"))
+ Expect(session.ErrorToString()).To(ContainSubstring("invalid umask"))
})
It("create container in pod with IP should fail", func() {
diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go
index 725118ab0..528fa143d 100644
--- a/test/e2e/events_test.go
+++ b/test/e2e/events_test.go
@@ -216,4 +216,25 @@ var _ = Describe("Podman events", func() {
Expect(result.OutputToString()).To(ContainSubstring("create"))
})
+ It("podman events health_status generated", func() {
+ session := podmanTest.Podman([]string{"run", "--name", "test-hc", "-dt", "--health-cmd", "echo working", "busybox"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ for i := 0; i < 5; i++ {
+ hc := podmanTest.Podman([]string{"healthcheck", "run", "test-hc"})
+ hc.WaitWithDefaultTimeout()
+ exitCode := hc.ExitCode()
+ if exitCode == 0 || i == 4 {
+ break
+ }
+ time.Sleep(1 * time.Second)
+ }
+
+ result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=health_status"})
+ result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(len(result.OutputToStringArray())).To(BeNumerically(">=", 1), "Number of health_status events")
+ })
+
})
diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go
index 08e8fbc8c..45a2f1f86 100644
--- a/test/e2e/generate_systemd_test.go
+++ b/test/e2e/generate_systemd_test.go
@@ -111,7 +111,7 @@ var _ = Describe("Podman generate systemd", func() {
})
It("podman generate systemd", func() {
- n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", nginx})
+ n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", NGINX_IMAGE})
n.WaitWithDefaultTimeout()
Expect(n).Should(Exit(0))
@@ -124,7 +124,7 @@ var _ = Describe("Podman generate systemd", func() {
})
It("podman generate systemd --files --name", func() {
- n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", nginx})
+ n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", NGINX_IMAGE})
n.WaitWithDefaultTimeout()
Expect(n).Should(Exit(0))
@@ -139,7 +139,7 @@ var _ = Describe("Podman generate systemd", func() {
})
It("podman generate systemd with timeout", func() {
- n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", nginx})
+ n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", NGINX_IMAGE})
n.WaitWithDefaultTimeout()
Expect(n).Should(Exit(0))
@@ -159,7 +159,7 @@ var _ = Describe("Podman generate systemd", func() {
})
It("podman generate systemd with user-defined dependencies", func() {
- n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", nginx})
+ n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", NGINX_IMAGE})
n.WaitWithDefaultTimeout()
Expect(n).Should(Exit(0))
diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go
index add739988..fd4e763f9 100644
--- a/test/e2e/healthcheck_run_test.go
+++ b/test/e2e/healthcheck_run_test.go
@@ -45,7 +45,7 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman disable healthcheck with --no-healthcheck on valid container", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--no-healthcheck", "--name", "hc", healthcheck})
+ session := podmanTest.Podman([]string{"run", "-dt", "--no-healthcheck", "--name", "hc", HEALTHCHECK_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"})
@@ -54,7 +54,7 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman disable healthcheck with --no-healthcheck must not show starting on status", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--no-healthcheck", "--name", "hc", healthcheck})
+ session := podmanTest.Podman([]string{"run", "-dt", "--no-healthcheck", "--name", "hc", HEALTHCHECK_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
hc := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.State.Health.Status}}", "hc"})
@@ -98,7 +98,7 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman disable healthcheck with --health-cmd=none on valid container", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "none", "--name", "hc", healthcheck})
+ session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "none", "--name", "hc", HEALTHCHECK_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"})
@@ -108,7 +108,7 @@ var _ = Describe("Podman healthcheck run", func() {
It("podman healthcheck on valid container", func() {
Skip("Extremely consistent flake - re-enable on debugging")
- session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", healthcheck})
+ session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", HEALTHCHECK_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -143,7 +143,7 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman healthcheck on stopped container", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", healthcheck, "ls"})
+ session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", HEALTHCHECK_IMAGE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
diff --git a/test/e2e/image_scp_test.go b/test/e2e/image_scp_test.go
index 2ad3cc75e..77fe810bd 100644
--- a/test/e2e/image_scp_test.go
+++ b/test/e2e/image_scp_test.go
@@ -22,12 +22,10 @@ var _ = Describe("podman image scp", func() {
)
BeforeEach(func() {
-
ConfPath.Value, ConfPath.IsSet = os.LookupEnv("CONTAINERS_CONF")
conf, err := ioutil.TempFile("", "containersconf")
- if err != nil {
- panic(err)
- }
+ Expect(err).ToNot(HaveOccurred())
+
os.Setenv("CONTAINERS_CONF", conf.Name())
tempdir, err = CreateTempDirInTempDir()
if err != nil {
@@ -52,42 +50,40 @@ var _ = Describe("podman image scp", func() {
})
It("podman image scp bogus image", func() {
- if IsRemote() {
- Skip("this test is only for non-remote")
- }
scp := podmanTest.Podman([]string{"image", "scp", "FOOBAR"})
scp.WaitWithDefaultTimeout()
- Expect(scp).To(ExitWithError())
+ Expect(scp).Should(ExitWithError())
})
It("podman image scp with proper connection", func() {
- if IsRemote() {
- Skip("this test is only for non-remote")
- }
cmd := []string{"system", "connection", "add",
"--default",
"QA",
- "ssh://root@server.fubar.com:2222/run/podman/podman.sock",
+ "ssh://root@podman.test:2222/run/podman/podman.sock",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
- Expect(session).To(Exit(0))
+ Expect(session).Should(Exit(0))
cfg, err := config.ReadCustomConfig()
Expect(err).ShouldNot(HaveOccurred())
- Expect(cfg.Engine).To(HaveField("ActiveService", "QA"))
+ Expect(cfg.Engine).Should(HaveField("ActiveService", "QA"))
Expect(cfg.Engine.ServiceDestinations).To(HaveKeyWithValue("QA",
config.Destination{
- URI: "ssh://root@server.fubar.com:2222/run/podman/podman.sock",
+ URI: "ssh://root@podman.test:2222/run/podman/podman.sock",
},
))
scp := podmanTest.Podman([]string{"image", "scp", ALPINE, "QA::"})
- scp.Wait(45)
+ scp.WaitWithDefaultTimeout()
// exit with error because we cannot make an actual ssh connection
// This tests that the input we are given is validated and prepared correctly
- // The error given should either be a missing image (due to testing suite complications) or a i/o timeout on ssh
- Expect(scp).To(ExitWithError())
+ // The error given should either be a missing image (due to testing suite complications) or a no such host timeout on ssh
+ Expect(scp).Should(ExitWithError())
+ // podman-remote exits with a different error
+ if !IsRemote() {
+ Expect(scp.ErrorToString()).Should(ContainSubstring("no such host"))
+ }
})
diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go
index 6fe850f0b..1ce2fa93d 100644
--- a/test/e2e/inspect_test.go
+++ b/test/e2e/inspect_test.go
@@ -182,8 +182,8 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect shows healthcheck on docker image", func() {
- podmanTest.AddImageToRWStore(healthcheck)
- session := podmanTest.Podman([]string{"inspect", "--format=json", healthcheck})
+ podmanTest.AddImageToRWStore(HEALTHCHECK_IMAGE)
+ session := podmanTest.Podman([]string{"inspect", "--format=json", HEALTHCHECK_IMAGE})
session.WaitWithDefaultTimeout()
imageData := session.InspectImageJSON()
Expect(imageData[0].HealthCheck.Timeout).To(BeNumerically("==", 3000000000))
diff --git a/test/e2e/kill_test.go b/test/e2e/kill_test.go
index 552a7c15d..2a9a86729 100644
--- a/test/e2e/kill_test.go
+++ b/test/e2e/kill_test.go
@@ -128,6 +128,26 @@ var _ = Describe("Podman kill", func() {
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
})
+ It("podman kill paused container", func() {
+ ctrName := "testctr"
+ session := podmanTest.RunTopContainer(ctrName)
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ pause := podmanTest.Podman([]string{"pause", ctrName})
+ pause.WaitWithDefaultTimeout()
+ Expect(pause).Should(Exit(0))
+
+ kill := podmanTest.Podman([]string{"kill", ctrName})
+ kill.WaitWithDefaultTimeout()
+ Expect(kill).Should(Exit(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", "-f", "{{.State.Status}}", ctrName})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect).Should(Exit(0))
+ Expect(inspect.OutputToString()).To(Or(Equal("stopped"), Equal("exited")))
+ })
+
It("podman kill --cidfile", func() {
tmpDir, err := ioutil.TempDir("", "")
Expect(err).To(BeNil())
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index a633bd3d7..ecb7a2278 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -76,3 +76,8 @@ func (p *PodmanTestIntegration) StopRemoteService() {}
// We don't support running API service when local
func (p *PodmanTestIntegration) StartRemoteService() {
}
+
+// Just a stub for compiling with `!remote`.
+func getRemoteOptions(p *PodmanTestIntegration, args []string) []string {
+ return nil
+}
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 4e6dcb8af..14dd6b6b8 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -8,6 +8,7 @@ import (
"time"
. "github.com/containers/podman/v4/test/utils"
+ "github.com/containers/storage/pkg/stringid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
@@ -102,12 +103,12 @@ var _ = Describe("Podman logs", func() {
It("tail 99 lines: "+log, func() {
skipIfJournaldInContainer()
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
+ name := "test1"
+ logc := podmanTest.Podman([]string{"run", "--name", name, "--log-driver", log, ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
logc.WaitWithDefaultTimeout()
Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
- results := podmanTest.Podman([]string{"logs", "--tail", "99", cid})
+ results := podmanTest.Podman([]string{"logs", "--tail", "99", name})
results.WaitWithDefaultTimeout()
Expect(results).To(Exit(0))
Expect(results.OutputToStringArray()).To(HaveLen(3))
@@ -116,11 +117,17 @@ var _ = Describe("Podman logs", func() {
It("tail 800 lines: "+log, func() {
skipIfJournaldInContainer()
+ // this uses -d so that we do not have 1000 unnecessary lines printed in every test log
logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "i=1; while [ \"$i\" -ne 1000 ]; do echo \"line $i\"; i=$((i + 1)); done"})
logc.WaitWithDefaultTimeout()
Expect(logc).To(Exit(0))
cid := logc.OutputToString()
+ // make sure we wait for the container to finish writing its output
+ wait := podmanTest.Podman([]string{"wait", cid})
+ wait.WaitWithDefaultTimeout()
+ Expect(wait).To(Exit(0))
+
results := podmanTest.Podman([]string{"logs", "--tail", "800", cid})
results.WaitWithDefaultTimeout()
Expect(results).To(Exit(0))
@@ -364,6 +371,26 @@ var _ = Describe("Podman logs", func() {
Expect(results.OutputToString()).To(Equal("stdout"))
Expect(results.ErrorToString()).To(Equal("stderr"))
})
+
+ It("podman logs partial log lines: "+log, func() {
+ skipIfJournaldInContainer()
+
+ cname := "log-test"
+ content := stringid.GenerateNonCryptoID()
+ // use printf to print no extra newline
+ logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", cname, ALPINE, "printf", content})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc).To(Exit(0))
+ // Important: do not use OutputToString(), this will remove the trailing newline from the output.
+ // However this test must make sure that there is no such extra newline.
+ Expect(string(logc.Out.Contents())).To(Equal(content))
+
+ logs := podmanTest.Podman([]string{"logs", cname})
+ logs.WaitWithDefaultTimeout()
+ Expect(logs).To(Exit(0))
+ // see comment above
+ Expect(string(logs.Out.Contents())).To(Equal(content))
+ })
}
It("using journald for container with container tag", func() {
diff --git a/test/e2e/manifest_test.go b/test/e2e/manifest_test.go
index 88ccdc87d..1f58419a1 100644
--- a/test/e2e/manifest_test.go
+++ b/test/e2e/manifest_test.go
@@ -42,7 +42,6 @@ var _ = Describe("Podman manifest", func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
-
})
It("create w/o image", func() {
session := podmanTest.Podman([]string{"manifest", "create", "foo"})
@@ -91,6 +90,27 @@ var _ = Describe("Podman manifest", func() {
Expect(session.OutputToString()).To(ContainSubstring(imageListARM64InstanceDigest))
})
+ It("add with new version", func() {
+ // Following test must pass for both podman and podman-remote
+ session := podmanTest.Podman([]string{"manifest", "create", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ id := strings.TrimSpace(string(session.Out.Contents()))
+
+ session = podmanTest.Podman([]string{"manifest", "inspect", id})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"manifest", "add", "--os-version", "7.7.7", "foo", imageListInstance})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"manifest", "inspect", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).To(ContainSubstring("7.7.7"))
+ })
+
It("tag", func() {
session := podmanTest.Podman([]string{"manifest", "create", "foobar"})
session.WaitWithDefaultTimeout()
@@ -274,10 +294,23 @@ var _ = Describe("Podman manifest", func() {
It("authenticated push", func() {
registryOptions := &podmanRegistry.Options{
- Image: "docker-archive:" + imageTarPath(registry),
+ Image: "docker-archive:" + imageTarPath(REGISTRY_IMAGE),
+ }
+
+ // registry script invokes $PODMAN; make sure we define that
+ // so it can use our same networking options.
+ opts := strings.Join(podmanTest.MakeOptions(nil, false, false), " ")
+ if IsRemote() {
+ opts = strings.Join(getRemoteOptions(podmanTest, nil), " ")
}
+ os.Setenv("PODMAN", podmanTest.PodmanBinary+" "+opts)
registry, err := podmanRegistry.StartWithOptions(registryOptions)
Expect(err).To(BeNil())
+ defer func() {
+ err := registry.Stop()
+ Expect(err).To(BeNil())
+ os.Unsetenv("PODMAN")
+ }()
session := podmanTest.Podman([]string{"manifest", "create", "foo"})
session.WaitWithDefaultTimeout()
@@ -306,9 +339,6 @@ var _ = Describe("Podman manifest", func() {
push = podmanTest.Podman([]string{"manifest", "push", "--tls-verify=false", "--creds=podmantest:wrongpasswd", "foo", "localhost:" + registry.Port + "/credstest"})
push.WaitWithDefaultTimeout()
Expect(push).To(ExitWithError())
-
- err = registry.Stop()
- Expect(err).To(BeNil())
})
It("push with error", func() {
@@ -384,4 +414,32 @@ var _ = Describe("Podman manifest", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
})
+
+ It("manifest rm should not remove image and should be able to remove tagged manifest list", func() {
+ // manifest rm should fail with `image is not a manifest list`
+ session := podmanTest.Podman([]string{"manifest", "rm", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(125))
+ Expect(session.ErrorToString()).To(ContainSubstring("image is not a manifest list"))
+
+ manifestName := "testmanifest:sometag"
+ session = podmanTest.Podman([]string{"manifest", "create", manifestName})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // verify if manifest exists
+ session = podmanTest.Podman([]string{"manifest", "exists", manifestName})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // manifest rm should be able to remove tagged manifest list
+ session = podmanTest.Podman([]string{"manifest", "rm", manifestName})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // verify that manifest should not exist
+ session = podmanTest.Podman([]string{"manifest", "exists", manifestName})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(1))
+ })
})
diff --git a/test/e2e/network_connect_disconnect_test.go b/test/e2e/network_connect_disconnect_test.go
index a0716c84d..c9ffe6a8d 100644
--- a/test/e2e/network_connect_disconnect_test.go
+++ b/test/e2e/network_connect_disconnect_test.go
@@ -2,7 +2,6 @@ package integration
import (
"os"
- "strings"
. "github.com/containers/podman/v4/test/utils"
"github.com/containers/storage/pkg/stringid"
@@ -94,7 +93,7 @@ var _ = Describe("Podman network connect and disconnect", func() {
exec2 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"})
exec2.WaitWithDefaultTimeout()
Expect(exec2).Should(Exit(0))
- Expect(strings.Contains(exec2.OutputToString(), ns)).To(BeTrue())
+ Expect(exec2.OutputToString()).To(ContainSubstring(ns))
dis := podmanTest.Podman([]string{"network", "disconnect", netName, "test"})
dis.WaitWithDefaultTimeout()
@@ -113,7 +112,12 @@ var _ = Describe("Podman network connect and disconnect", func() {
exec3 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"})
exec3.WaitWithDefaultTimeout()
Expect(exec3).Should(Exit(0))
- Expect(strings.Contains(exec3.OutputToString(), ns)).To(BeFalse())
+ Expect(exec3.OutputToString()).ToNot(ContainSubstring(ns))
+
+ // make sure stats still works https://github.com/containers/podman/issues/13824
+ stats := podmanTest.Podman([]string{"stats", "test", "--no-stream"})
+ stats.WaitWithDefaultTimeout()
+ Expect(stats).Should(Exit(0))
})
It("bad network name in connect should result in error", func() {
@@ -206,7 +210,7 @@ var _ = Describe("Podman network connect and disconnect", func() {
exec2 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"})
exec2.WaitWithDefaultTimeout()
Expect(exec2).Should(Exit(0))
- Expect(strings.Contains(exec2.OutputToString(), ns)).To(BeFalse())
+ Expect(exec2.OutputToString()).ToNot(ContainSubstring(ns))
ip := "10.11.100.99"
mac := "44:11:44:11:44:11"
@@ -235,7 +239,12 @@ var _ = Describe("Podman network connect and disconnect", func() {
exec3 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"})
exec3.WaitWithDefaultTimeout()
Expect(exec3).Should(Exit(0))
- Expect(strings.Contains(exec3.OutputToString(), ns)).To(BeTrue())
+ Expect(exec3.OutputToString()).To(ContainSubstring(ns))
+
+ // make sure stats works https://github.com/containers/podman/issues/13824
+ stats := podmanTest.Podman([]string{"stats", "test", "--no-stream"})
+ stats.WaitWithDefaultTimeout()
+ Expect(stats).Should(Exit(0))
// make sure no logrus errors are shown https://github.com/containers/podman/issues/9602
rm := podmanTest.Podman([]string{"rm", "--time=0", "-f", "test"})
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index 715455521..d4f60d3e4 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -163,6 +163,26 @@ var _ = Describe("Podman network", func() {
Expect(session.OutputToString()).To(Not(ContainSubstring(name)))
})
+ It("podman network list --filter dangling", func() {
+ name, path := generateNetworkConfig(podmanTest)
+ defer removeConf(path)
+
+ session := podmanTest.Podman([]string{"network", "ls", "--filter", "dangling=true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).To(ContainSubstring(name))
+
+ session = podmanTest.Podman([]string{"network", "ls", "--filter", "dangling=false"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).NotTo(ContainSubstring(name))
+
+ session = podmanTest.Podman([]string{"network", "ls", "--filter", "dangling=foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(ExitWithError())
+ Expect(session.ErrorToString()).To(ContainSubstring(`invalid dangling filter value "foo"`))
+ })
+
It("podman network ID test", func() {
net := "networkIDTest"
// the network id should be the sha256 hash of the network name
@@ -487,14 +507,14 @@ var _ = Describe("Podman network", func() {
interval *= 2
}
- top := podmanTest.Podman([]string{"run", "-dt", "--name=web", "--network=" + netName, "--network-alias=web1", "--network-alias=web2", nginx})
+ top := podmanTest.Podman([]string{"run", "-dt", "--name=web", "--network=" + netName, "--network-alias=web1", "--network-alias=web2", NGINX_IMAGE})
top.WaitWithDefaultTimeout()
Expect(top).Should(Exit(0))
interval = 250 * time.Millisecond
// Wait for the nginx service to be running
for i := 0; i < 6; i++ {
// Test curl against the container's name
- c1 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, nginx, "curl", "web"})
+ c1 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, NGINX_IMAGE, "curl", "web"})
c1.WaitWithDefaultTimeout()
worked = c1.ExitCode() == 0
if worked {
@@ -507,12 +527,12 @@ var _ = Describe("Podman network", func() {
// Nginx is now running so no need to do a loop
// Test against the first alias
- c2 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, nginx, "curl", "web1"})
+ c2 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, NGINX_IMAGE, "curl", "web1"})
c2.WaitWithDefaultTimeout()
Expect(c2).Should(Exit(0))
// Test against the second alias
- c3 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, nginx, "curl", "web2"})
+ c3 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, NGINX_IMAGE, "curl", "web2"})
c3.WaitWithDefaultTimeout()
Expect(c3).Should(Exit(0))
})
@@ -538,14 +558,14 @@ var _ = Describe("Podman network", func() {
interval *= 2
}
- top := podmanTest.Podman([]string{"run", "-dt", "--name=web", "--network=" + netName, "--network-alias=web1", "--network-alias=web2", nginx})
+ top := podmanTest.Podman([]string{"run", "-dt", "--name=web", "--network=" + netName, "--network-alias=web1", "--network-alias=web2", NGINX_IMAGE})
top.WaitWithDefaultTimeout()
Expect(top).Should(Exit(0))
interval = 250 * time.Millisecond
// Wait for the nginx service to be running
for i := 0; i < 6; i++ {
// Test curl against the container's name
- c1 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, nginx, "curl", "web"})
+ c1 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, NGINX_IMAGE, "curl", "web"})
c1.WaitWithDefaultTimeout()
worked = c1.ExitCode() == 0
if worked {
@@ -558,12 +578,12 @@ var _ = Describe("Podman network", func() {
// Nginx is now running so no need to do a loop
// Test against the first alias
- c2 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, nginx, "curl", "web1"})
+ c2 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, NGINX_IMAGE, "curl", "web1"})
c2.WaitWithDefaultTimeout()
Expect(c2).Should(Exit(0))
// Test against the second alias
- c3 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, nginx, "curl", "web2"})
+ c3 := podmanTest.Podman([]string{"run", "--dns-search", "dns.podman", "--network=" + netName, NGINX_IMAGE, "curl", "web2"})
c3.WaitWithDefaultTimeout()
Expect(c3).Should(Exit(0))
})
diff --git a/test/e2e/pause_test.go b/test/e2e/pause_test.go
index 402719de2..d677eddb0 100644
--- a/test/e2e/pause_test.go
+++ b/test/e2e/pause_test.go
@@ -82,7 +82,7 @@ var _ = Describe("Podman pause", func() {
// check we can read stats for a paused container
result = podmanTest.Podman([]string{"stats", "--no-stream", cid})
result.WaitWithDefaultTimeout()
- Expect(result).To(ExitWithError())
+ Expect(result).Should(Exit(0))
})
It("podman pause a running container by id", func() {
@@ -273,7 +273,7 @@ var _ = Describe("Podman pause", func() {
It("Pause a bunch of running containers", func() {
for i := 0; i < 3; i++ {
name := fmt.Sprintf("test%d", i)
- run := podmanTest.Podman([]string{"run", "-dt", "--name", name, nginx})
+ run := podmanTest.Podman([]string{"run", "-dt", "--name", name, NGINX_IMAGE})
run.WaitWithDefaultTimeout()
Expect(run).Should(Exit(0))
@@ -300,7 +300,7 @@ var _ = Describe("Podman pause", func() {
It("Unpause a bunch of running containers", func() {
for i := 0; i < 3; i++ {
name := fmt.Sprintf("test%d", i)
- run := podmanTest.Podman([]string{"run", "-dt", "--name", name, nginx})
+ run := podmanTest.Podman([]string{"run", "-dt", "--name", name, NGINX_IMAGE})
run.WaitWithDefaultTimeout()
Expect(run).Should(Exit(0))
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 31044f68b..457aaebb2 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -1512,7 +1512,7 @@ var _ = Describe("Podman play kube", func() {
// If you do not supply command or args for a Container, the defaults defined in the Docker image are used.
It("podman play kube test correct args and cmd when not specified", func() {
- pod := getPod(withCtr(getCtr(withImage(registry), withCmd(nil), withArg(nil))))
+ pod := getPod(withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd(nil), withArg(nil))))
err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -1536,7 +1536,7 @@ var _ = Describe("Podman play kube", func() {
// If you supply a command but no args for a Container, only the supplied command is used.
// The default EntryPoint and the default Cmd defined in the Docker image are ignored.
It("podman play kube test correct command with only set command in yaml file", func() {
- pod := getPod(withCtr(getCtr(withImage(registry), withCmd([]string{"echo", "hello"}), withArg(nil))))
+ pod := getPod(withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd([]string{"echo", "hello"}), withArg(nil))))
err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -1587,7 +1587,7 @@ var _ = Describe("Podman play kube", func() {
// If you supply only args for a Container, the default Entrypoint defined in the Docker image is run with the args that you supplied.
It("podman play kube test correct command with only set args in yaml file", func() {
- pod := getPod(withCtr(getCtr(withImage(registry), withCmd(nil), withArg([]string{"echo", "hello"}))))
+ pod := getPod(withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd(nil), withArg([]string{"echo", "hello"}))))
err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -1611,7 +1611,7 @@ var _ = Describe("Podman play kube", func() {
// the default Entrypoint and the default Cmd defined in the Docker image are ignored.
// Your command is run with your args.
It("podman play kube test correct command with both set args and cmd in yaml file", func() {
- pod := getPod(withCtr(getCtr(withImage(registry), withCmd([]string{"echo"}), withArg([]string{"hello"}))))
+ pod := getPod(withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd([]string{"echo"}), withArg([]string{"hello"}))))
err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -2507,7 +2507,7 @@ spec:
Expect(kube).To(ExitWithError())
})
- It("podman play kube test with read only HostPath volume", func() {
+ It("podman play kube test with read-only HostPath volume", func() {
hostPathLocation := filepath.Join(tempdir, "file")
f, err := os.Create(hostPathLocation)
Expect(err).To(BeNil())
@@ -3688,7 +3688,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
})
// Check the block devices are exposed inside container
- It("ddpodman play kube expose block device inside container", func() {
+ It("podman play kube expose block device inside container", func() {
SkipIfRootless("It needs root access to create devices")
// randomize the folder name to avoid error when running tests with multiple nodes
@@ -3705,7 +3705,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
blockVolume := getHostPathVolume("BlockDevice", devicePath)
- pod := getPod(withVolume(blockVolume), withCtr(getCtr(withImage(registry), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
+ pod := getPod(withVolume(blockVolume), withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -3727,7 +3727,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
})
// Check the char devices are exposed inside container
- It("ddpodman play kube expose character device inside container", func() {
+ It("podman play kube expose character device inside container", func() {
SkipIfRootless("It needs root access to create devices")
// randomize the folder name to avoid error when running tests with multiple nodes
@@ -3744,7 +3744,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
charVolume := getHostPathVolume("CharDevice", devicePath)
- pod := getPod(withVolume(charVolume), withCtr(getCtr(withImage(registry), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
+ pod := getPod(withVolume(charVolume), withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -3772,7 +3772,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
blockVolume := getHostPathVolume("BlockDevice", devicePath)
- pod := getPod(withVolume(blockVolume), withCtr(getCtr(withImage(registry), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
+ pod := getPod(withVolume(blockVolume), withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -3781,7 +3781,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
Expect(kube).Should(Exit(125))
})
- It("ddpodman play kube reports error when we try to expose char device as block device", func() {
+ It("podman play kube reports error when we try to expose char device as block device", func() {
SkipIfRootless("It needs root access to create devices")
// randomize the folder name to avoid error when running tests with multiple nodes
@@ -3798,7 +3798,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
charVolume := getHostPathVolume("BlockDevice", devicePath)
- pod := getPod(withVolume(charVolume), withCtr(getCtr(withImage(registry), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
+ pod := getPod(withVolume(charVolume), withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -3807,7 +3807,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
Expect(kube).Should(Exit(125))
})
- It("ddpodman play kube reports error when we try to expose block device as char device", func() {
+ It("podman play kube reports error when we try to expose block device as char device", func() {
SkipIfRootless("It needs root access to create devices")
// randomize the folder name to avoid error when running tests with multiple nodes
@@ -3823,7 +3823,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
charVolume := getHostPathVolume("CharDevice", devicePath)
- pod := getPod(withVolume(charVolume), withCtr(getCtr(withImage(registry), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
+ pod := getPod(withVolume(charVolume), withCtr(getCtr(withImage(REGISTRY_IMAGE), withCmd(nil), withArg(nil), withVolumeMount(devicePath, false))))
err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
diff --git a/test/e2e/pod_clone_test.go b/test/e2e/pod_clone_test.go
new file mode 100644
index 000000000..9c7abe7a8
--- /dev/null
+++ b/test/e2e/pod_clone_test.go
@@ -0,0 +1,194 @@
+package integration
+
+import (
+ "os"
+
+ . "github.com/containers/podman/v4/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gexec"
+)
+
+var _ = Describe("Podman pod clone", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ hostname, _ = os.Hostname()
+ )
+
+ BeforeEach(func() {
+ SkipIfRemote("podman pod clone is not supported in remote")
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+
+ })
+
+ It("podman pod clone basic test", func() {
+ create := podmanTest.Podman([]string{"pod", "create", "--name", "1"})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+
+ run := podmanTest.Podman([]string{"run", "--pod", "1", "-dt", ALPINE})
+ run.WaitWithDefaultTimeout()
+ Expect(run).To(Exit(0))
+
+ clone := podmanTest.Podman([]string{"pod", "clone", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ podInspect := podmanTest.Podman([]string{"pod", "inspect", clone.OutputToString()})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).To(Exit(0))
+ data := podInspect.InspectPodToJSON()
+ Expect(data.Name).To(ContainSubstring("-clone"))
+
+ podStart := podmanTest.Podman([]string{"pod", "start", clone.OutputToString()})
+ podStart.WaitWithDefaultTimeout()
+ Expect(podStart).To(Exit(0))
+
+ podInspect = podmanTest.Podman([]string{"pod", "inspect", clone.OutputToString()})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).To(Exit(0))
+ data = podInspect.InspectPodToJSON()
+ Expect(data.Containers[1].State).To(ContainSubstring("running")) // make sure non infra ctr is running
+ })
+
+ It("podman pod clone renaming test", func() {
+ create := podmanTest.Podman([]string{"pod", "create", "--name", "1"})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+
+ clone := podmanTest.Podman([]string{"pod", "clone", "--name", "2", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ podInspect := podmanTest.Podman([]string{"pod", "inspect", clone.OutputToString()})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).To(Exit(0))
+ data := podInspect.InspectPodToJSON()
+ Expect(data.Name).To(ContainSubstring("2"))
+
+ podStart := podmanTest.Podman([]string{"pod", "start", clone.OutputToString()})
+ podStart.WaitWithDefaultTimeout()
+ Expect(podStart).To(Exit(0))
+ })
+
+ It("podman pod clone start test", func() {
+ create := podmanTest.Podman([]string{"pod", "create", "--name", "1"})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+
+ clone := podmanTest.Podman([]string{"pod", "clone", "--start", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ podInspect := podmanTest.Podman([]string{"pod", "inspect", clone.OutputToString()})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).To(Exit(0))
+ data := podInspect.InspectPodToJSON()
+ Expect(data.State).To(ContainSubstring("Running"))
+
+ })
+
+ It("podman pod clone destroy test", func() {
+ create := podmanTest.Podman([]string{"pod", "create", "--name", "1"})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+
+ clone := podmanTest.Podman([]string{"pod", "clone", "--destroy", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ podInspect := podmanTest.Podman([]string{"pod", "inspect", create.OutputToString()})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).ToNot(Exit(0))
+ })
+
+ It("podman pod clone infra option test", func() {
+ // proof of concept that all currently tested infra options work since
+
+ volName := "testVol"
+ volCreate := podmanTest.Podman([]string{"volume", "create", volName})
+ volCreate.WaitWithDefaultTimeout()
+ Expect(volCreate).Should(Exit(0))
+
+ podName := "testPod"
+ podCreate := podmanTest.Podman([]string{"pod", "create", "--name", podName})
+ podCreate.WaitWithDefaultTimeout()
+ Expect(podCreate).Should(Exit(0))
+
+ podClone := podmanTest.Podman([]string{"pod", "clone", "--volume", volName + ":/tmp1", podName})
+ podClone.WaitWithDefaultTimeout()
+ Expect(podClone).Should(Exit(0))
+
+ podInspect := podmanTest.Podman([]string{"pod", "inspect", "testPod-clone"})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).Should(Exit(0))
+ data := podInspect.InspectPodToJSON()
+ Expect(data.Mounts[0]).To(HaveField("Name", volName))
+ })
+
+ It("podman pod clone --shm-size", func() {
+ podCreate := podmanTest.Podman([]string{"pod", "create"})
+ podCreate.WaitWithDefaultTimeout()
+ Expect(podCreate).Should(Exit(0))
+
+ podClone := podmanTest.Podman([]string{"pod", "clone", "--shm-size", "10mb", podCreate.OutputToString()})
+ podClone.WaitWithDefaultTimeout()
+ Expect(podClone).Should(Exit(0))
+
+ run := podmanTest.Podman([]string{"run", "-it", "--pod", podClone.OutputToString(), ALPINE, "mount"})
+ run.WaitWithDefaultTimeout()
+ Expect(run).Should(Exit(0))
+ t, strings := run.GrepString("shm on /dev/shm type tmpfs")
+ Expect(t).To(BeTrue())
+ Expect(strings[0]).Should(ContainSubstring("size=10240k"))
+ })
+
+ It("podman pod clone --uts test", func() {
+ SkipIfRemote("hostname for the custom NS test is not as expected on the remote client")
+
+ session := podmanTest.Podman([]string{"pod", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"pod", "clone", "--uts", "host", session.OutputToString()})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"run", "-it", "--pod", session.OutputToString(), ALPINE, "printenv", "HOSTNAME"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).To(ContainSubstring(hostname))
+
+ podName := "utsPod"
+ ns := "ns:/proc/self/ns/"
+
+ session = podmanTest.Podman([]string{"pod", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // just share uts with a custom path
+ podCreate := podmanTest.Podman([]string{"pod", "clone", "--uts", ns, "--name", podName, session.OutputToString()})
+ podCreate.WaitWithDefaultTimeout()
+ Expect(podCreate).Should(Exit(0))
+
+ podInspect := podmanTest.Podman([]string{"pod", "inspect", podName})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).Should(Exit(0))
+ podJSON := podInspect.InspectPodToJSON()
+ Expect(podJSON.InfraConfig).To(HaveField("UtsNS", ns))
+ })
+
+})
diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go
index dedb1caeb..3caae2bd5 100644
--- a/test/e2e/pod_create_test.go
+++ b/test/e2e/pod_create_test.go
@@ -23,9 +23,10 @@ import (
var _ = Describe("Podman pod create", func() {
var (
- tempdir string
- err error
- podmanTest *PodmanTestIntegration
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ hostname, _ = os.Hostname()
)
BeforeEach(func() {
@@ -98,7 +99,7 @@ var _ = Describe("Podman pod create", func() {
Expect(session).Should(Exit(0))
pod := session.OutputToString()
- webserver := podmanTest.Podman([]string{"run", "--pod", pod, "-dt", nginx})
+ webserver := podmanTest.Podman([]string{"run", "--pod", pod, "-dt", NGINX_IMAGE})
webserver.WaitWithDefaultTimeout()
Expect(webserver).Should(Exit(0))
@@ -114,7 +115,7 @@ var _ = Describe("Podman pod create", func() {
Expect(session).Should(Exit(0))
pod := session.OutputToString()
- webserver := podmanTest.Podman([]string{"run", "--pod", pod, "-dt", nginx})
+ webserver := podmanTest.Podman([]string{"run", "--pod", pod, "-dt", NGINX_IMAGE})
webserver.WaitWithDefaultTimeout()
Expect(webserver).Should(Exit(0))
Expect(ncz(port)).To(BeTrue())
@@ -128,7 +129,7 @@ var _ = Describe("Podman pod create", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- webserver := podmanTest.Podman([]string{"run", "--pod-id-file", file, "-dt", nginx})
+ webserver := podmanTest.Podman([]string{"run", "--pod-id-file", file, "-dt", NGINX_IMAGE})
webserver.WaitWithDefaultTimeout()
Expect(webserver).Should(Exit(0))
Expect(ncz(port)).To(BeTrue())
@@ -899,27 +900,6 @@ ENTRYPOINT ["sleep","99999"]
})
- It("podman pod create --device-read-bps", func() {
- SkipIfRootless("Cannot create devices in /dev in rootless mode")
- SkipIfRootlessCgroupsV1("Setting device-read-bps not supported on cgroupv1 for rootless users")
-
- podName := "testPod"
- session := podmanTest.Podman([]string{"pod", "create", "--device-read-bps", "/dev/zero:1mb", "--name", podName})
- session.WaitWithDefaultTimeout()
- Expect(session).Should(Exit(0))
-
- if CGROUPSV2 {
- session = podmanTest.Podman([]string{"run", "--rm", "--pod", podName, ALPINE, "sh", "-c", "cat /sys/fs/cgroup/$(sed -e 's|0::||' < /proc/self/cgroup)/io.max"})
- } else {
- session = podmanTest.Podman([]string{"run", "--rm", "--pod", podName, ALPINE, "cat", "/sys/fs/cgroup/blkio/blkio.throttle.read_bps_device"})
- }
- session.WaitWithDefaultTimeout()
- Expect(session).Should(Exit(0))
- if !CGROUPSV2 {
- Expect(session.OutputToString()).To(ContainSubstring("1048576"))
- }
- })
-
It("podman pod create --volumes-from", func() {
volName := "testVol"
volCreate := podmanTest.Podman([]string{"volume", "create", volName})
@@ -1112,4 +1092,73 @@ ENTRYPOINT ["sleep","99999"]
})
+ It("podman pod create infra inheritance test", func() {
+ volName := "testVol1"
+ volCreate := podmanTest.Podman([]string{"volume", "create", volName})
+ volCreate.WaitWithDefaultTimeout()
+ Expect(volCreate).Should(Exit(0))
+
+ session := podmanTest.Podman([]string{"pod", "create", "-v", volName + ":/vol1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ volName2 := "testVol2"
+ volCreate = podmanTest.Podman([]string{"volume", "create", volName2})
+ volCreate.WaitWithDefaultTimeout()
+ Expect(volCreate).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"run", "--pod", session.OutputToString(), "-v", volName2 + ":/vol2", ALPINE, "mount"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).Should(ContainSubstring("/vol1"))
+ Expect(session.OutputToString()).Should(ContainSubstring("/vol2"))
+ })
+
+ It("podman pod create --shm-size", func() {
+ podCreate := podmanTest.Podman([]string{"pod", "create", "--shm-size", "10mb"})
+ podCreate.WaitWithDefaultTimeout()
+ Expect(podCreate).Should(Exit(0))
+
+ run := podmanTest.Podman([]string{"run", "-it", "--pod", podCreate.OutputToString(), ALPINE, "mount"})
+ run.WaitWithDefaultTimeout()
+ Expect(run).Should(Exit(0))
+ t, strings := run.GrepString("shm on /dev/shm type tmpfs")
+ Expect(t).To(BeTrue())
+ Expect(strings[0]).Should(ContainSubstring("size=10240k"))
+ })
+
+ It("podman pod create --shm-size and --ipc=host conflict", func() {
+ podCreate := podmanTest.Podman([]string{"pod", "create", "--shm-size", "10mb"})
+ podCreate.WaitWithDefaultTimeout()
+ Expect(podCreate).Should(Exit(0))
+
+ run := podmanTest.Podman([]string{"run", "-dt", "--pod", podCreate.OutputToString(), "--ipc", "host", ALPINE})
+ run.WaitWithDefaultTimeout()
+ Expect(run).ShouldNot(Exit(0))
+ })
+
+ It("podman pod create --uts test", func() {
+ session := podmanTest.Podman([]string{"pod", "create", "--uts", "host"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"run", "-it", "--pod", session.OutputToString(), ALPINE, "printenv", "HOSTNAME"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).To(ContainSubstring(hostname))
+
+ podName := "utsPod"
+ ns := "ns:/proc/self/ns/"
+
+ // just share uts with a custom path
+ podCreate := podmanTest.Podman([]string{"pod", "create", "--uts", ns, "--name", podName, "--share", "uts"})
+ podCreate.WaitWithDefaultTimeout()
+ Expect(podCreate).Should(Exit(0))
+
+ podInspect := podmanTest.Podman([]string{"pod", "inspect", podName})
+ podInspect.WaitWithDefaultTimeout()
+ Expect(podInspect).Should(Exit(0))
+ podJSON := podInspect.InspectPodToJSON()
+ Expect(podJSON.InfraConfig).To(HaveField("UtsNS", ns))
+ })
})
diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go
index ab204992c..a2e090524 100644
--- a/test/e2e/pod_infra_container_test.go
+++ b/test/e2e/pod_infra_container_test.go
@@ -114,7 +114,7 @@ var _ = Describe("Podman pod create", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- session = podmanTest.Podman([]string{"run", "-d", "--pod", podID, nginx})
+ session = podmanTest.Podman([]string{"run", "-d", "--pod", podID, NGINX_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -125,6 +125,29 @@ var _ = Describe("Podman pod create", func() {
session = podmanTest.Podman([]string{"run", fedoraMinimal, "curl", "-f", "localhost"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
+
+ session = podmanTest.Podman([]string{"pod", "create", "--network", "host"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"run", "--name", "hostCtr", "--pod", session.OutputToString(), ALPINE, "readlink", "/proc/self/ns/net"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ ns := SystemExec("readlink", []string{"/proc/self/ns/net"})
+ ns.WaitWithDefaultTimeout()
+ Expect(ns).Should(Exit(0))
+ netns := ns.OutputToString()
+ Expect(netns).ToNot(BeEmpty())
+
+ Expect(session.OutputToString()).To(Equal(netns))
+
+ // Sanity Check for podman inspect
+ session = podmanTest.Podman([]string{"inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "hostCtr"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToString()).Should(Equal("''")) // no network path... host
+
})
It("podman pod correctly sets up IPCNS", func() {
@@ -214,11 +237,11 @@ var _ = Describe("Podman pod create", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- session = podmanTest.Podman([]string{"run", "-d", "--pod", podID, nginx})
+ session = podmanTest.Podman([]string{"run", "-d", "--pod", podID, NGINX_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- session = podmanTest.Podman([]string{"run", "--pod", podID, "--network", "bridge", nginx, "curl", "-f", "localhost"})
+ session = podmanTest.Podman([]string{"run", "--pod", podID, "--network", "bridge", NGINX_IMAGE, "curl", "-f", "localhost"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
})
diff --git a/test/e2e/pod_pause_test.go b/test/e2e/pod_pause_test.go
index d78890347..39e5696fa 100644
--- a/test/e2e/pod_pause_test.go
+++ b/test/e2e/pod_pause_test.go
@@ -56,7 +56,7 @@ var _ = Describe("Podman pod pause", func() {
Expect(result).Should(Exit(0))
})
- It("podman pod pause a running pod by id", func() {
+ It("pause a running pod by id", func() {
_, ec, podid := podmanTest.CreatePod(nil)
Expect(ec).To(Equal(0))
@@ -73,11 +73,10 @@ var _ = Describe("Podman pod pause", func() {
result = podmanTest.Podman([]string{"pod", "unpause", podid})
result.WaitWithDefaultTimeout()
-
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
})
- It("podman unpause a running pod by id", func() {
+ It("unpause a paused pod by id", func() {
_, ec, podid := podmanTest.CreatePod(nil)
Expect(ec).To(Equal(0))
@@ -85,14 +84,21 @@ var _ = Describe("Podman pod pause", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- result := podmanTest.Podman([]string{"pod", "unpause", podid})
+ result := podmanTest.Podman([]string{"pod", "pause", podid})
result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(result.OutputToStringArray()).Should(HaveLen(1))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.GetContainerStatus()).To(Equal(pausedState))
+ result = podmanTest.Podman([]string{"pod", "unpause", podid})
+ result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
+ Expect(result.OutputToStringArray()).Should(HaveLen(1))
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
})
- It("podman pod pause a running pod by name", func() {
+ It("unpause a paused pod by name", func() {
_, ec, _ := podmanTest.CreatePod(map[string][]string{"--name": {"test1"}})
Expect(ec).To(Equal(0))
@@ -102,13 +108,42 @@ var _ = Describe("Podman pod pause", func() {
result := podmanTest.Podman([]string{"pod", "pause", "test1"})
result.WaitWithDefaultTimeout()
-
Expect(result).Should(Exit(0))
+ Expect(result.OutputToStringArray()).Should(HaveLen(1))
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.GetContainerStatus()).To(Equal(pausedState))
result = podmanTest.Podman([]string{"pod", "unpause", "test1"})
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
+ Expect(result.OutputToStringArray()).Should(HaveLen(1))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ })
+
+ It("unpause --all", func() {
+ _, ec, podid1 := podmanTest.CreatePod(nil)
+ Expect(ec).To(Equal(0))
+ _, ec, podid2 := podmanTest.CreatePod(nil)
+ Expect(ec).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("", podid1)
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ session = podmanTest.RunTopContainerInPod("", podid2)
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ result := podmanTest.Podman([]string{"pod", "pause", podid1})
+ result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(result.OutputToStringArray()).Should(HaveLen(1))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring(pausedState))
+
+ result = podmanTest.Podman([]string{"pod", "unpause", "--all"})
+ result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(result.OutputToStringArray()).Should(HaveLen(1))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
})
})
diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go
index 75adf1724..119c8d41e 100644
--- a/test/e2e/prune_test.go
+++ b/test/e2e/prune_test.go
@@ -258,6 +258,29 @@ var _ = Describe("Podman prune", func() {
Expect(pods.OutputToStringArray()).To(HaveLen(2))
})
+ It("podman system prune networks", func() {
+ // About netavark network backend test.
+ session := podmanTest.Podman([]string{"network", "create", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"system", "prune", "-f"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // Default network should exists.
+ session = podmanTest.Podman([]string{"network", "ls", "-q", "--filter", "name=^podman$"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToStringArray()).To(HaveLen(1))
+
+ // Remove all unused networks.
+ session = podmanTest.Podman([]string{"network", "ls", "-q", "--filter", "name=^test$"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToStringArray()).To(HaveLen(0))
+ })
+
It("podman system prune - pod,container stopped", func() {
session := podmanTest.Podman([]string{"pod", "create"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go
index 55e8d637b..04b7a280d 100644
--- a/test/e2e/pull_test.go
+++ b/test/e2e/pull_test.go
@@ -256,7 +256,7 @@ var _ = Describe("Podman pull", func() {
It("podman pull from docker-archive", func() {
SkipIfRemote("podman-remote does not support pulling from docker-archive")
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
tarfn := filepath.Join(podmanTest.TempDir, "cirros.tar")
session := podmanTest.Podman([]string{"save", "-o", tarfn, "cirros"})
session.WaitWithDefaultTimeout()
@@ -319,7 +319,7 @@ var _ = Describe("Podman pull", func() {
It("podman pull from oci-archive", func() {
SkipIfRemote("podman-remote does not support pulling from oci-archive")
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
tarfn := filepath.Join(podmanTest.TempDir, "oci-cirrus.tar")
session := podmanTest.Podman([]string{"save", "--format", "oci-archive", "-o", tarfn, "cirros"})
session.WaitWithDefaultTimeout()
@@ -339,7 +339,7 @@ var _ = Describe("Podman pull", func() {
It("podman pull from local directory", func() {
SkipIfRemote("podman-remote does not support pulling from local directory")
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
dirpath := filepath.Join(podmanTest.TempDir, "cirros")
err = os.MkdirAll(dirpath, os.ModePerm)
Expect(err).ToNot(HaveOccurred())
@@ -363,7 +363,7 @@ var _ = Describe("Podman pull", func() {
It("podman pull from local OCI directory", func() {
SkipIfRemote("podman-remote does not support pulling from OCI directory")
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
dirpath := filepath.Join(podmanTest.TempDir, "cirros")
err = os.MkdirAll(dirpath, os.ModePerm)
Expect(err).ToNot(HaveOccurred())
diff --git a/test/e2e/push_test.go b/test/e2e/push_test.go
index 0288bf915..97567e40d 100644
--- a/test/e2e/push_test.go
+++ b/test/e2e/push_test.go
@@ -96,17 +96,16 @@ var _ = Describe("Podman push", func() {
})
It("podman push to local registry", func() {
- SkipIfRemote("Remote does not support --digestfile or --remove-signatures")
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
if rootless.IsRootless() {
- err := podmanTest.RestoreArtifact(registry)
+ err := podmanTest.RestoreArtifact(REGISTRY_IMAGE)
Expect(err).ToNot(HaveOccurred())
}
lock := GetPortLock("5000")
defer lock.Unlock()
- session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -118,6 +117,7 @@ var _ = Describe("Podman push", func() {
push.WaitWithDefaultTimeout()
Expect(push).Should(Exit(0))
+ SkipIfRemote("Remote does not support --digestfile")
// Test --digestfile option
push2 := podmanTest.Podman([]string{"push", "--tls-verify=false", "--digestfile=/tmp/digestfile.txt", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
push2.WaitWithDefaultTimeout()
@@ -156,7 +156,7 @@ var _ = Describe("Podman push", func() {
}
lock := GetPortLock("5000")
defer lock.Unlock()
- session := podmanTest.Podman([]string{"run", "--entrypoint", "htpasswd", registry, "-Bbn", "podmantest", "test"})
+ session := podmanTest.Podman([]string{"run", "--entrypoint", "htpasswd", REGISTRY_IMAGE, "-Bbn", "podmantest", "test"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -173,7 +173,7 @@ var _ = Describe("Podman push", func() {
strings.Join([]string{authPath, "/auth"}, ":"), "-e", "REGISTRY_AUTH=htpasswd", "-e",
"REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm", "-e", "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd",
"-v", strings.Join([]string{certPath, "/certs"}, ":"), "-e", "REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt",
- "-e", "REGISTRY_HTTP_TLS_KEY=/certs/domain.key", registry})
+ "-e", "REGISTRY_HTTP_TLS_KEY=/certs/domain.key", REGISTRY_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
diff --git a/test/e2e/rmi_test.go b/test/e2e/rmi_test.go
index cc3cceda5..d1a0cd6f5 100644
--- a/test/e2e/rmi_test.go
+++ b/test/e2e/rmi_test.go
@@ -50,7 +50,7 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi with short name", func() {
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
session := podmanTest.Podman([]string{"rmi", "cirros"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -58,7 +58,7 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi all images", func() {
- podmanTest.AddImageToRWStore(nginx)
+ podmanTest.AddImageToRWStore(NGINX_IMAGE)
session := podmanTest.Podman([]string{"rmi", "-a"})
session.WaitWithDefaultTimeout()
images := podmanTest.Podman([]string{"images"})
@@ -68,7 +68,7 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi all images forcibly with short options", func() {
- podmanTest.AddImageToRWStore(nginx)
+ podmanTest.AddImageToRWStore(NGINX_IMAGE)
session := podmanTest.Podman([]string{"rmi", "-fa"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -76,12 +76,12 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi tagged image", func() {
- podmanTest.AddImageToRWStore(cirros)
- setup := podmanTest.Podman([]string{"images", "-q", cirros})
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
+ setup := podmanTest.Podman([]string{"images", "-q", CIRROS_IMAGE})
setup.WaitWithDefaultTimeout()
Expect(setup).Should(Exit(0))
- session := podmanTest.Podman([]string{"tag", cirros, "foo:bar", "foo"})
+ session := podmanTest.Podman([]string{"tag", CIRROS_IMAGE, "foo:bar", "foo"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -93,8 +93,8 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi image with tags by ID cannot be done without force", func() {
- podmanTest.AddImageToRWStore(cirros)
- setup := podmanTest.Podman([]string{"images", "-q", cirros})
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
+ setup := podmanTest.Podman([]string{"images", "-q", CIRROS_IMAGE})
setup.WaitWithDefaultTimeout()
Expect(setup).Should(Exit(0))
cirrosID := setup.OutputToString()
@@ -116,8 +116,8 @@ var _ = Describe("Podman rmi", func() {
It("podman rmi image that is a parent of another image", func() {
Skip("I need help with this one. i don't understand what is going on")
- podmanTest.AddImageToRWStore(cirros)
- session := podmanTest.Podman([]string{"run", "--name", "c_test", cirros, "true"})
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
+ session := podmanTest.Podman([]string{"run", "--name", "c_test", CIRROS_IMAGE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -129,7 +129,7 @@ var _ = Describe("Podman rmi", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- session = podmanTest.Podman([]string{"rmi", cirros})
+ session = podmanTest.Podman([]string{"rmi", CIRROS_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -183,12 +183,12 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi with cached images", func() {
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
dockerfile := fmt.Sprintf(`FROM %s
RUN mkdir hello
RUN touch test.txt
ENV foo=bar
- `, cirros)
+ `, CIRROS_IMAGE)
podmanTest.BuildImage(dockerfile, "test", "true")
dockerfile = fmt.Sprintf(`FROM %s
@@ -196,7 +196,7 @@ var _ = Describe("Podman rmi", func() {
RUN touch test.txt
RUN mkdir blah
ENV foo=bar
- `, cirros)
+ `, CIRROS_IMAGE)
podmanTest.BuildImage(dockerfile, "test2", "true")
@@ -225,7 +225,7 @@ var _ = Describe("Podman rmi", func() {
podmanTest.BuildImage(dockerfile, "test3", "true")
- session = podmanTest.Podman([]string{"rmi", cirros})
+ session = podmanTest.Podman([]string{"rmi", CIRROS_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -250,7 +250,7 @@ var _ = Describe("Podman rmi", func() {
})
It("podman rmi -a with parent|child images", func() {
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
dockerfile := fmt.Sprintf(`FROM %s AS base
RUN touch /1
ENV LOCAL=/1
@@ -258,7 +258,7 @@ RUN find $LOCAL
FROM base
RUN find $LOCAL
-`, cirros)
+`, CIRROS_IMAGE)
podmanTest.BuildImage(dockerfile, "test", "true")
session := podmanTest.Podman([]string{"rmi", "-a"})
session.WaitWithDefaultTimeout()
@@ -285,7 +285,7 @@ RUN find $LOCAL
// a race, we may not hit the condition a 100 percent of times
// but ocal reproducers hit it all the time.
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
var wg sync.WaitGroup
buildAndRemove := func(i int) {
@@ -293,7 +293,7 @@ RUN find $LOCAL
defer wg.Done()
imageName := fmt.Sprintf("rmtest:%d", i)
containerfile := fmt.Sprintf(`FROM %s
-RUN touch %s`, cirros, imageName)
+RUN touch %s`, CIRROS_IMAGE, imageName)
podmanTest.BuildImage(containerfile, imageName, "false")
session := podmanTest.Podman([]string{"rmi", "-f", imageName})
diff --git a/test/e2e/run_aardvark_test.go b/test/e2e/run_aardvark_test.go
index 25eb8b538..2c7dea9f4 100644
--- a/test/e2e/run_aardvark_test.go
+++ b/test/e2e/run_aardvark_test.go
@@ -42,7 +42,7 @@ var _ = Describe("Podman run networking", func() {
defer podmanTest.removeNetwork(netName)
Expect(session).Should(Exit(0))
- ctrID := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netName, nginx})
+ ctrID := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netName, NGINX_IMAGE})
ctrID.WaitWithDefaultTimeout()
Expect(ctrID).Should(Exit(0))
cid := ctrID.OutputToString()
@@ -53,7 +53,7 @@ var _ = Describe("Podman run networking", func() {
cip := ctrIP.OutputToString()
Expect(cip).To(MatchRegexp(IPRegex))
- digShort(cid, "aone", []string{cip}, podmanTest)
+ digShort(cid, "aone", cip, podmanTest)
reverseLookup := podmanTest.Podman([]string{"exec", cid, "dig", "+short", "-x", cip})
reverseLookup.WaitWithDefaultTimeout()
@@ -72,7 +72,7 @@ var _ = Describe("Podman run networking", func() {
defer podmanTest.removeNetwork(netName)
Expect(session).Should(Exit(0))
- ctr1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netName, nginx})
+ ctr1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netName, NGINX_IMAGE})
ctr1.WaitWithDefaultTimeout()
Expect(ctr1).Should(Exit(0))
cid1 := ctr1.OutputToString()
@@ -83,7 +83,7 @@ var _ = Describe("Podman run networking", func() {
cip1 := ctrIP1.OutputToString()
Expect(cip1).To(MatchRegexp(IPRegex))
- ctr2 := podmanTest.Podman([]string{"run", "-dt", "--name", "atwo", "--network", netName, nginx})
+ ctr2 := podmanTest.Podman([]string{"run", "-dt", "--name", "atwo", "--network", netName, NGINX_IMAGE})
ctr2.WaitWithDefaultTimeout()
Expect(ctr2).Should(Exit(0))
cid2 := ctr2.OutputToString()
@@ -94,9 +94,9 @@ var _ = Describe("Podman run networking", func() {
cip2 := ctrIP2.OutputToString()
Expect(cip2).To(MatchRegexp(IPRegex))
- digShort("aone", "atwo", []string{cip2}, podmanTest)
+ digShort("aone", "atwo", cip2, podmanTest)
- digShort("atwo", "aone", []string{cip1}, podmanTest)
+ digShort("atwo", "aone", cip1, podmanTest)
reverseLookup12 := podmanTest.Podman([]string{"exec", cid1, "dig", "+short", "-x", cip2})
reverseLookup12.WaitWithDefaultTimeout()
@@ -123,7 +123,7 @@ var _ = Describe("Podman run networking", func() {
defer podmanTest.removeNetwork(netName)
Expect(session).Should(Exit(0))
- ctr1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netName, "--network-alias", "alias_a1,alias_1a", nginx})
+ ctr1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netName, "--network-alias", "alias_a1,alias_1a", NGINX_IMAGE})
ctr1.WaitWithDefaultTimeout()
Expect(ctr1).Should(Exit(0))
@@ -133,7 +133,7 @@ var _ = Describe("Podman run networking", func() {
cip1 := ctrIP1.OutputToString()
Expect(cip1).To(MatchRegexp(IPRegex))
- ctr2 := podmanTest.Podman([]string{"run", "-dt", "--name", "atwo", "--network", netName, "--network-alias", "alias_a2,alias_2a", nginx})
+ ctr2 := podmanTest.Podman([]string{"run", "-dt", "--name", "atwo", "--network", netName, "--network-alias", "alias_a2,alias_2a", NGINX_IMAGE})
ctr2.WaitWithDefaultTimeout()
Expect(ctr2).Should(Exit(0))
@@ -143,17 +143,17 @@ var _ = Describe("Podman run networking", func() {
cip2 := ctrIP2.OutputToString()
Expect(cip2).To(MatchRegexp(IPRegex))
- digShort("aone", "atwo", []string{cip2}, podmanTest)
+ digShort("aone", "atwo", cip2, podmanTest)
- digShort("aone", "alias_a2", []string{cip2}, podmanTest)
+ digShort("aone", "alias_a2", cip2, podmanTest)
- digShort("aone", "alias_2a", []string{cip2}, podmanTest)
+ digShort("aone", "alias_2a", cip2, podmanTest)
- digShort("atwo", "aone", []string{cip1}, podmanTest)
+ digShort("atwo", "aone", cip1, podmanTest)
- digShort("atwo", "alias_a1", []string{cip1}, podmanTest)
+ digShort("atwo", "alias_a1", cip1, podmanTest)
- digShort("atwo", "alias_1a", []string{cip1}, podmanTest)
+ digShort("atwo", "alias_1a", cip1, podmanTest)
})
@@ -170,11 +170,11 @@ var _ = Describe("Podman run networking", func() {
defer podmanTest.removeNetwork(netNameB)
Expect(sessionB).Should(Exit(0))
- ctrA1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netNameA, nginx})
+ ctrA1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netNameA, NGINX_IMAGE})
ctrA1.WaitWithDefaultTimeout()
cidA1 := ctrA1.OutputToString()
- ctrB1 := podmanTest.Podman([]string{"run", "-dt", "--name", "bone", "--network", netNameB, nginx})
+ ctrB1 := podmanTest.Podman([]string{"run", "-dt", "--name", "bone", "--network", netNameB, NGINX_IMAGE})
ctrB1.WaitWithDefaultTimeout()
cidB1 := ctrB1.OutputToString()
@@ -214,7 +214,7 @@ var _ = Describe("Podman run networking", func() {
defer podmanTest.removeNetwork(netNameB)
Expect(sessionB).Should(Exit(0))
- ctrA1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netNameA, nginx})
+ ctrA1 := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netNameA, NGINX_IMAGE})
ctrA1.WaitWithDefaultTimeout()
cidA1 := ctrA1.OutputToString()
@@ -224,7 +224,7 @@ var _ = Describe("Podman run networking", func() {
cipA1 := ctrIPA1.OutputToString()
Expect(cipA1).To(MatchRegexp(IPRegex))
- ctrB1 := podmanTest.Podman([]string{"run", "-dt", "--name", "bone", "--network", netNameB, nginx})
+ ctrB1 := podmanTest.Podman([]string{"run", "-dt", "--name", "bone", "--network", netNameB, NGINX_IMAGE})
ctrB1.WaitWithDefaultTimeout()
cidB1 := ctrB1.OutputToString()
@@ -234,7 +234,7 @@ var _ = Describe("Podman run networking", func() {
cipB1 := ctrIPB1.OutputToString()
Expect(cipB1).To(MatchRegexp(IPRegex))
- ctrA2B2 := podmanTest.Podman([]string{"run", "-dt", "--name", "atwobtwo", "--network", netNameA, "--network", netNameB, nginx})
+ ctrA2B2 := podmanTest.Podman([]string{"run", "-dt", "--name", "atwobtwo", "--network", netNameA, "--network", netNameB, NGINX_IMAGE})
ctrA2B2.WaitWithDefaultTimeout()
cidA2B2 := ctrA2B2.OutputToString()
@@ -250,13 +250,13 @@ var _ = Describe("Podman run networking", func() {
cipA2B22 := ctrIPA2B22.OutputToString()
Expect(cipA2B22).To(MatchRegexp(IPRegex))
- digShort("aone", "atwobtwo", []string{cipA2B21}, podmanTest)
+ digShort("aone", "atwobtwo", cipA2B21, podmanTest)
- digShort("bone", "atwobtwo", []string{cipA2B22}, podmanTest)
+ digShort("bone", "atwobtwo", cipA2B22, podmanTest)
- digShort("atwobtwo", "aone", []string{cipA1}, podmanTest)
+ digShort("atwobtwo", "aone", cipA1, podmanTest)
- digShort("atwobtwo", "bone", []string{cipB1}, podmanTest)
+ digShort("atwobtwo", "bone", cipB1, podmanTest)
})
It("Aardvark Test 6: Three subnets, first container on 1/2 and second on 2/3, w/ network aliases", func() {
@@ -278,11 +278,11 @@ var _ = Describe("Podman run networking", func() {
defer podmanTest.removeNetwork(netNameC)
Expect(sessionC).Should(Exit(0))
- ctrA := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netNameA, nginx})
+ ctrA := podmanTest.Podman([]string{"run", "-dt", "--name", "aone", "--network", netNameA, NGINX_IMAGE})
ctrA.WaitWithDefaultTimeout()
Expect(ctrA).Should(Exit(0))
- ctrC := podmanTest.Podman([]string{"run", "-dt", "--name", "cone", "--network", netNameC, nginx})
+ ctrC := podmanTest.Podman([]string{"run", "-dt", "--name", "cone", "--network", netNameC, NGINX_IMAGE})
ctrC.WaitWithDefaultTimeout()
Expect(ctrC).Should(Exit(0))
@@ -304,10 +304,9 @@ var _ = Describe("Podman run networking", func() {
Expect(ctrIPCB2).Should(Exit(0))
cipCB2 := ctrIPCB2.OutputToString()
- digShort("aone", "testB2_nw", []string{cipCB2}, podmanTest)
-
- digShort("cone", "testB1_nw", []string{cipAB1}, podmanTest)
+ digShort("aone", "testB2_nw", cipCB2, podmanTest)
+ digShort("cone", "testB1_nw", cipAB1, podmanTest)
})
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 3b32b4b82..1ad78c950 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -381,7 +381,7 @@ EXPOSE 2004-2005/tcp`, ALPINE)
session := podmanTest.Podman([]string{"run", "-dt", "-p", fmt.Sprintf("%d:%d", port1, port2), ALPINE, "/bin/sh"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- results := SystemExec("iptables", []string{"-t", "nat", "-L"})
+ results := SystemExec("iptables", []string{"-t", "nat", "-nvL"})
Expect(results).Should(Exit(0))
Expect(results.OutputToString()).To(ContainSubstring(fmt.Sprintf("%d", port2)))
@@ -513,7 +513,7 @@ EXPOSE 2004-2005/tcp`, ALPINE)
})
It("podman run network expose ports in image metadata", func() {
- session := podmanTest.Podman([]string{"create", "--name", "test", "-t", "-P", nginx})
+ session := podmanTest.Podman([]string{"create", "--name", "test", "-t", "-P", NGINX_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
results := podmanTest.Podman([]string{"inspect", "test"})
diff --git a/test/e2e/run_privileged_test.go b/test/e2e/run_privileged_test.go
index 4f0b512c6..dfaff7e67 100644
--- a/test/e2e/run_privileged_test.go
+++ b/test/e2e/run_privileged_test.go
@@ -131,6 +131,30 @@ var _ = Describe("Podman privileged container tests", func() {
Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 20))
})
+ It("podman privileged should restart after host devices change", func() {
+ containerName := "privileged-restart-test"
+ SkipIfRootless("Cannot create devices in /dev in rootless mode")
+ Expect(os.MkdirAll("/dev/foodevdir", os.ModePerm)).To(BeNil())
+
+ mknod := SystemExec("mknod", []string{"/dev/foodevdir/null", "c", "1", "3"})
+ mknod.WaitWithDefaultTimeout()
+ Expect(mknod).Should(Exit(0))
+
+ session := podmanTest.Podman([]string{"run", "--name=" + containerName, "--privileged", "-it", fedoraMinimal, "ls", "/dev"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ deviceFiles := session.OutputToStringArray()
+
+ os.RemoveAll("/dev/foodevdir")
+ session = podmanTest.Podman([]string{"start", "--attach", containerName})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ deviceFilesAfterRemoval := session.OutputToStringArray()
+ Expect(deviceFiles).To(Not(Equal(deviceFilesAfterRemoval)))
+ })
+
It("run no-new-privileges test", func() {
// Check if our kernel is new enough
k, err := IsKernelNewerThan("4.14")
diff --git a/test/e2e/run_staticip_test.go b/test/e2e/run_staticip_test.go
index af3f98d4b..8207f6d0b 100644
--- a/test/e2e/run_staticip_test.go
+++ b/test/e2e/run_staticip_test.go
@@ -28,7 +28,7 @@ var _ = Describe("Podman run with --ip flag", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- // Cleanup the CNI networks used by the tests
+ // Clean up the CNI networks used by the tests
os.RemoveAll("/var/lib/cni/networks/podman")
})
@@ -101,7 +101,7 @@ var _ = Describe("Podman run with --ip flag", func() {
It("Podman run two containers with the same IP", func() {
ip := GetRandomIPAddress()
- result := podmanTest.Podman([]string{"run", "-d", "--name", "nginx", "--ip", ip, nginx})
+ result := podmanTest.Podman([]string{"run", "-d", "--name", "nginx", "--ip", ip, NGINX_IMAGE})
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 828e92170..6edb705a1 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -73,8 +73,30 @@ var _ = Describe("Podman run", func() {
Expect(session.OutputToString()).To(ContainSubstring("graphRootMounted=1"))
})
+ It("podman run from manifest list", func() {
+ session := podmanTest.Podman([]string{"manifest", "create", "localhost/test:latest"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"build", "-f", "build/Containerfile.with-platform", "--platform", "linux/amd64,linux/arm64", "--manifest", "localhost/test:latest"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"run", "--platform", "linux/arm64", "localhost/test", "uname", "-a"})
+ session.WaitWithDefaultTimeout()
+ exitCode := session.ExitCode()
+ // CI could either support requested platform or not, if it supports then output should contain `aarch64`
+ // if not run should fail with a very specific error i.e `Exec format error` anything other than this should
+ // be marked as failure of test.
+ if exitCode == 0 {
+ Expect(session.OutputToString()).To(ContainSubstring("aarch64"))
+ } else {
+ Expect(session.ErrorToString()).To(ContainSubstring("Exec format error"))
+ }
+ })
+
It("podman run a container based on a complex local image name", func() {
- imageName := strings.TrimPrefix(nginx, "quay.io/")
+ imageName := strings.TrimPrefix(NGINX_IMAGE, "quay.io/")
session := podmanTest.Podman([]string{"run", imageName, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ErrorToString()).ToNot(ContainSubstring("Trying to pull"))
@@ -119,10 +141,10 @@ var _ = Describe("Podman run", func() {
})
It("podman run a container based on on a short name with localhost", func() {
- tag := podmanTest.Podman([]string{"tag", nginx, "localhost/libpod/alpine_nginx:latest"})
+ tag := podmanTest.Podman([]string{"tag", NGINX_IMAGE, "localhost/libpod/alpine_nginx:latest"})
tag.WaitWithDefaultTimeout()
- rmi := podmanTest.Podman([]string{"rmi", nginx})
+ rmi := podmanTest.Podman([]string{"rmi", NGINX_IMAGE})
rmi.WaitWithDefaultTimeout()
session := podmanTest.Podman([]string{"run", "libpod/alpine_nginx:latest", "ls"})
@@ -132,10 +154,10 @@ var _ = Describe("Podman run", func() {
})
It("podman container run a container based on on a short name with localhost", func() {
- tag := podmanTest.Podman([]string{"image", "tag", nginx, "localhost/libpod/alpine_nginx:latest"})
+ tag := podmanTest.Podman([]string{"image", "tag", NGINX_IMAGE, "localhost/libpod/alpine_nginx:latest"})
tag.WaitWithDefaultTimeout()
- rmi := podmanTest.Podman([]string{"image", "rm", nginx})
+ rmi := podmanTest.Podman([]string{"image", "rm", NGINX_IMAGE})
rmi.WaitWithDefaultTimeout()
session := podmanTest.Podman([]string{"container", "run", "libpod/alpine_nginx:latest", "ls"})
@@ -176,7 +198,7 @@ var _ = Describe("Podman run", func() {
lock := GetPortLock("5000")
defer lock.Unlock()
- session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -1019,7 +1041,7 @@ echo -n %s >%s
})
It("podman run with built-in volume image", func() {
- session := podmanTest.Podman([]string{"run", "--rm", redis, "ls"})
+ session := podmanTest.Podman([]string{"run", "--rm", REDIS_IMAGE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -1084,7 +1106,7 @@ USER mail`, BB)
Expect(session).Should(Exit(0))
ctrID := session.OutputToString()
- // check that the read only option works
+ // check that the read-only option works
session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro", ALPINE, "touch", mountpoint + "abc.txt"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(1))
@@ -1108,13 +1130,13 @@ USER mail`, BB)
Expect(session).Should(Exit(125))
Expect(session.ErrorToString()).To(ContainSubstring("cannot set :z more than once in mount options"))
- // create new read only volume
+ // create new read-only volume
session = podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint + ":ro", ALPINE, "cat", mountpoint + filename})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
ctrID = session.OutputToString()
- // check if the original volume was mounted as read only that --volumes-from also mount it as read only
+ // check if the original volume was mounted as read-only that --volumes-from also mount it as read-only
session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "touch", mountpoint + "abc.txt"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(1))
@@ -1122,7 +1144,7 @@ USER mail`, BB)
})
It("podman run --volumes-from flag with built-in volumes", func() {
- session := podmanTest.Podman([]string{"create", redis, "sh"})
+ session := podmanTest.Podman([]string{"create", REDIS_IMAGE, "sh"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
ctrID := session.OutputToString()
@@ -1637,7 +1659,7 @@ USER mail`, BB)
session = podmanTest.Podman([]string{"run", "--umask", "9999", "--rm", ALPINE, "sh", "-c", "umask"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
- Expect(session.ErrorToString()).To(ContainSubstring("Invalid umask"))
+ Expect(session.ErrorToString()).To(ContainSubstring("invalid umask"))
})
It("podman run makes workdir from image", func() {
@@ -1679,24 +1701,24 @@ WORKDIR /madethis`, BB)
})
It("podman run container with --pull missing and only pull once", func() {
- session := podmanTest.Podman([]string{"run", "--pull", "missing", cirros, "ls"})
+ session := podmanTest.Podman([]string{"run", "--pull", "missing", CIRROS_IMAGE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull"))
- session = podmanTest.Podman([]string{"run", "--pull", "missing", cirros, "ls"})
+ session = podmanTest.Podman([]string{"run", "--pull", "missing", CIRROS_IMAGE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.ErrorToString()).ToNot(ContainSubstring("Trying to pull"))
})
It("podman run container with --pull missing should pull image multiple times", func() {
- session := podmanTest.Podman([]string{"run", "--pull", "always", cirros, "ls"})
+ session := podmanTest.Podman([]string{"run", "--pull", "always", CIRROS_IMAGE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull"))
- session = podmanTest.Podman([]string{"run", "--pull", "always", cirros, "ls"})
+ session = podmanTest.Podman([]string{"run", "--pull", "always", CIRROS_IMAGE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull"))
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 3bef889b7..aa8f49176 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -150,7 +150,7 @@ var _ = Describe("Podman run with volumes", func() {
})
It("podman run with conflict between image volume and user mount succeeds", func() {
- err = podmanTest.RestoreArtifact(redis)
+ err = podmanTest.RestoreArtifact(REDIS_IMAGE)
Expect(err).ToNot(HaveOccurred())
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
err := os.Mkdir(mountPath, 0755)
@@ -160,7 +160,7 @@ var _ = Describe("Podman run with volumes", func() {
Expect(err).To(BeNil(), "os.Create(testfile)")
f.Close()
Expect(err).To(BeNil())
- session := podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:/data", mountPath), redis, "ls", "/data/test1"})
+ session := podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:/data", mountPath), REDIS_IMAGE, "ls", "/data/test1"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
})
@@ -325,6 +325,51 @@ var _ = Describe("Podman run with volumes", func() {
})
+ It("podman support overlay volume with custom upperdir and workdir", func() {
+ SkipIfRemote("Overlay volumes only work locally")
+ if os.Getenv("container") != "" {
+ Skip("Overlay mounts not supported when running in a container")
+ }
+ if rootless.IsRootless() {
+ if _, err := exec.LookPath("fuse-overlayfs"); err != nil {
+ Skip("Fuse-Overlayfs required for rootless overlay mount test")
+ }
+ }
+
+ // Use bindsource instead of named volume
+ bindSource := filepath.Join(tempdir, "bindsource")
+ err := os.Mkdir(bindSource, 0755)
+ Expect(err).To(BeNil(), "mkdir "+bindSource)
+
+ // create persistent upperdir on host
+ upperDir := filepath.Join(tempdir, "upper")
+ err = os.Mkdir(upperDir, 0755)
+ Expect(err).To(BeNil(), "mkdir "+upperDir)
+
+ // create persistent workdir on host
+ workDir := filepath.Join(tempdir, "work")
+ err = os.Mkdir(workDir, 0755)
+ Expect(err).To(BeNil(), "mkdir "+workDir)
+
+ overlayOpts := fmt.Sprintf("upperdir=%s,workdir=%s", upperDir, workDir)
+
+ // create file on overlay volume
+ session := podmanTest.Podman([]string{"run", "--volume", bindSource + ":/data:O," + overlayOpts, ALPINE, "sh", "-c", "echo hello >> " + "/data/overlay"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"run", "--volume", bindSource + ":/data:O," + overlayOpts, ALPINE, "sh", "-c", "ls /data"})
+ session.WaitWithDefaultTimeout()
+ // must contain `overlay` file since it should be persistent on specified upper and workdir
+ Expect(session.OutputToString()).To(ContainSubstring("overlay"))
+
+ session = podmanTest.Podman([]string{"run", "--volume", bindSource + ":/data:O", ALPINE, "sh", "-c", "ls /data"})
+ session.WaitWithDefaultTimeout()
+ // must not contain `overlay` file which was on custom upper and workdir since we have not specified any upper or workdir
+ Expect(session.OutputToString()).To(Not(ContainSubstring("overlay")))
+
+ })
+
It("podman run with noexec can't exec", func() {
session := podmanTest.Podman([]string{"run", "--rm", "-v", "/bin:/hostbin:noexec", ALPINE, "/hostbin/ls", "/"})
session.WaitWithDefaultTimeout()
@@ -407,6 +452,14 @@ var _ = Describe("Podman run with volumes", func() {
separateVolumeSession.WaitWithDefaultTimeout()
Expect(separateVolumeSession).Should(Exit(0))
Expect(separateVolumeSession.OutputToString()).To(Equal(baselineOutput))
+
+ copySession := podmanTest.Podman([]string{"run", "--rm", "-v", "testvol3:/etc/apk:copy", ALPINE, "stat", "-c", "%h", "/etc/apk/arch"})
+ copySession.WaitWithDefaultTimeout()
+ Expect(copySession).Should(Exit(0))
+
+ noCopySession := podmanTest.Podman([]string{"run", "--rm", "-v", "testvol4:/etc/apk:nocopy", ALPINE, "stat", "-c", "%h", "/etc/apk/arch"})
+ noCopySession.WaitWithDefaultTimeout()
+ Expect(noCopySession).Should(Exit(1))
})
It("podman named volume copyup symlink", func() {
@@ -539,7 +592,7 @@ RUN sh -c "cd /etc/apk && ln -s ../../testfile"`, ALPINE)
})
It("podman run image volume is not noexec", func() {
- session := podmanTest.Podman([]string{"run", "--rm", redis, "grep", "/data", "/proc/self/mountinfo"})
+ session := podmanTest.Podman([]string{"run", "--rm", REDIS_IMAGE, "grep", "/data", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(Not(ContainSubstring("noexec")))
@@ -625,6 +678,15 @@ VOLUME /test/`, ALPINE)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
+ // Test overlay mount when lowerdir is relative path.
+ f, err = os.Create("hello")
+ Expect(err).To(BeNil(), "os.Create")
+ f.Close()
+ session = podmanTest.Podman([]string{"run", "--rm", "-v", ".:/app:O", ALPINE, "ls", "/app"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.OutputToString()).To(ContainSubstring("hello"))
+ Expect(session).Should(Exit(0))
+
// Make sure modifications in container do not show up on host
session = podmanTest.Podman([]string{"run", "--rm", "-v", volumeFlag, ALPINE, "touch", "/run/test/container"})
session.WaitWithDefaultTimeout()
@@ -863,6 +925,20 @@ USER testuser`, fedoraMinimal)
Expect(session.OutputToString()).To(Equal(perms))
})
+ It("podman run with -v $SRC:/run does not create /run/.containerenv", func() {
+ mountSrc := filepath.Join(podmanTest.TempDir, "vol-test1")
+ err := os.MkdirAll(mountSrc, 0755)
+ Expect(err).To(BeNil())
+
+ session := podmanTest.Podman([]string{"run", "-v", mountSrc + ":/run", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // the file should not have been created
+ _, err = os.Stat(filepath.Join(mountSrc, ".containerenv"))
+ Expect(err).To(Not(BeNil()))
+ })
+
It("podman volume with uid and gid works", func() {
volName := "testVol"
volCreate := podmanTest.Podman([]string{"volume", "create", "--opt", "o=uid=1000", volName})
@@ -894,4 +970,32 @@ USER testuser`, fedoraMinimal)
Expect(volMount).Should(Exit(0))
Expect(volMount.OutputToString()).To(Equal("1000:1000"))
})
+
+ It("podman run -v with a relative dir", func() {
+ mountPath := filepath.Join(podmanTest.TempDir, "vol")
+ err = os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
+ defer func() {
+ err := os.RemoveAll(mountPath)
+ Expect(err).ToNot(HaveOccurred())
+ }()
+
+ f, err := os.CreateTemp(mountPath, "podman")
+ Expect(err).ToNot(HaveOccurred())
+
+ cwd, err := os.Getwd()
+ Expect(err).ToNot(HaveOccurred())
+
+ err = os.Chdir(mountPath)
+ Expect(err).ToNot(HaveOccurred())
+ defer func() {
+ err := os.Chdir(cwd)
+ Expect(err).ToNot(HaveOccurred())
+ }()
+
+ run := podmanTest.Podman([]string{"run", "-it", "--security-opt", "label=disable", "-v", "./:" + dest, ALPINE, "ls", dest})
+ run.WaitWithDefaultTimeout()
+ Expect(run).Should(Exit(0))
+ Expect(run.OutputToString()).Should(ContainSubstring(strings.TrimLeft("/vol/", f.Name())))
+ })
})
diff --git a/test/e2e/save_test.go b/test/e2e/save_test.go
index 897e49ef7..94c363dd4 100644
--- a/test/e2e/save_test.go
+++ b/test/e2e/save_test.go
@@ -153,7 +153,7 @@ var _ = Describe("Podman save", func() {
defer os.Setenv("GNUPGHOME", origGNUPGHOME)
port := 5000
- session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", strings.Join([]string{strconv.Itoa(port), strconv.Itoa(port)}, ":"), "quay.io/libpod/registry:2.6"})
+ session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", strings.Join([]string{strconv.Itoa(port), strconv.Itoa(port)}, ":"), REGISTRY_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
if !WaitContainerReady(podmanTest, "registry", "listening on", 20, 1) {
@@ -226,13 +226,17 @@ default-docker:
})
It("podman save --multi-image-archive (untagged images)", func() {
- // Refer to images via ID instead of tag.
- session := podmanTest.Podman([]string{"images", "--format", "{{.ID}}"})
+ // #14468: to make execution time more predictable, save at
+ // most three images and sort them by size.
+ session := podmanTest.Podman([]string{"images", "--sort", "size", "--format", "{{.ID}}"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
ids := session.OutputToStringArray()
Expect(len(ids)).To(BeNumerically(">", 1), "We need to have *some* images to save")
+ if len(ids) > 3 {
+ ids = ids[:3]
+ }
multiImageSave(podmanTest, ids)
})
})
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 64677ba54..f8b1bc836 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -206,7 +206,7 @@ registries = ['{{.Host}}:{{.Port}}']`
port := GetPort()
fakereg := podmanTest.Podman([]string{"run", "-d", "--name", "registry",
"-p", fmt.Sprintf("%d:5000", port),
- registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
fakereg.WaitWithDefaultTimeout()
Expect(fakereg).Should(Exit(0))
@@ -231,7 +231,7 @@ registries = ['{{.Host}}:{{.Port}}']`
}
port := GetPort()
registry := podmanTest.Podman([]string{"run", "-d", "--name", "registry3",
- "-p", fmt.Sprintf("%d:5000", port), registry,
+ "-p", fmt.Sprintf("%d:5000", port), REGISTRY_IMAGE,
"/entrypoint.sh", "/etc/docker/registry/config.yml"})
registry.WaitWithDefaultTimeout()
Expect(registry).Should(Exit(0))
@@ -268,7 +268,7 @@ registries = ['{{.Host}}:{{.Port}}']`
port := GetPort()
ep := endpoint{Port: fmt.Sprintf("%d", port), Host: "localhost"}
registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%d:5000", port),
- "--name", "registry4", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ "--name", "registry4", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
registry.WaitWithDefaultTimeout()
Expect(registry).Should(Exit(0))
@@ -313,7 +313,7 @@ registries = ['{{.Host}}:{{.Port}}']`
port := GetPort()
ep := endpoint{Port: fmt.Sprintf("%d", port), Host: "localhost"}
registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%d:5000", port),
- "--name", "registry5", registry})
+ "--name", "registry5", REGISTRY_IMAGE})
registry.WaitWithDefaultTimeout()
Expect(registry).Should(Exit(0))
@@ -353,7 +353,7 @@ registries = ['{{.Host}}:{{.Port}}']`
port := GetPort()
ep := endpoint{Port: fmt.Sprintf("%d", port), Host: "localhost"}
registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%d:5000", port),
- "--name", "registry6", registry})
+ "--name", "registry6", REGISTRY_IMAGE})
registry.WaitWithDefaultTimeout()
Expect(registry).Should(Exit(0))
@@ -401,7 +401,7 @@ registries = ['{{.Host}}:{{.Port}}']`
ep3 := endpoint{Port: fmt.Sprintf("%d", port3), Host: "localhost"}
registryLocal := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%d", port1),
- "--name", "registry7", registry})
+ "--name", "registry7", REGISTRY_IMAGE})
registryLocal.WaitWithDefaultTimeout()
Expect(registryLocal).Should(Exit(0))
@@ -409,7 +409,7 @@ registries = ['{{.Host}}:{{.Port}}']`
Fail("Cannot start docker registry on port %s", port1)
}
- registryLocal = podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%d:5000", port2), "--name", "registry8", registry})
+ registryLocal = podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%d:5000", port2), "--name", "registry8", REGISTRY_IMAGE})
registryLocal.WaitWithDefaultTimeout()
Expect(registryLocal).Should(Exit(0))
@@ -455,7 +455,6 @@ registries = ['{{.Host}}:{{.Port}}']`
})
It("podman search with wildcards", func() {
- Skip("FIXME: search on registry.redhat.io is broken (Dec 16 '21)")
search := podmanTest.Podman([]string{"search", "registry.redhat.io/*openshift*"})
search.WaitWithDefaultTimeout()
Expect(search).Should(Exit(0))
diff --git a/test/e2e/stats_test.go b/test/e2e/stats_test.go
index b43a81cd3..3000a819f 100644
--- a/test/e2e/stats_test.go
+++ b/test/e2e/stats_test.go
@@ -236,4 +236,15 @@ var _ = Describe("Podman stats", func() {
Expect(customLimit).To(BeNumerically("<", defaultLimit))
})
+
+ It("podman stats with a container that is not running", func() {
+ ctr := "created_container"
+ session := podmanTest.Podman([]string{"create", "--name", ctr, ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"stats", "--no-stream", ctr})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ })
})
diff --git a/test/e2e/system_connection_test.go b/test/e2e/system_connection_test.go
index 2228c23b2..baa31424b 100644
--- a/test/e2e/system_connection_test.go
+++ b/test/e2e/system_connection_test.go
@@ -47,9 +47,7 @@ var _ = Describe("podman system connection", func() {
}
f := CurrentGinkgoTestDescription()
- _, _ = GinkgoWriter.Write(
- []byte(
- fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())))
+ processTestResult(f)
})
Context("without running API service", func() {
@@ -58,7 +56,7 @@ var _ = Describe("podman system connection", func() {
"--default",
"--identity", "~/.ssh/id_rsa",
"QA",
- "ssh://root@server.fubar.com:2222/run/podman/podman.sock",
+ "ssh://root@podman.test:2222/run/podman/podman.sock",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
@@ -67,10 +65,10 @@ var _ = Describe("podman system connection", func() {
cfg, err := config.ReadCustomConfig()
Expect(err).ShouldNot(HaveOccurred())
- Expect(cfg).To(HaveActiveService("QA"))
+ Expect(cfg).Should(HaveActiveService("QA"))
Expect(cfg).Should(VerifyService(
"QA",
- "ssh://root@server.fubar.com:2222/run/podman/podman.sock",
+ "ssh://root@podman.test:2222/run/podman/podman.sock",
"~/.ssh/id_rsa",
))
@@ -82,7 +80,7 @@ var _ = Describe("podman system connection", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- Expect(config.ReadCustomConfig()).To(HaveActiveService("QE"))
+ Expect(config.ReadCustomConfig()).Should(HaveActiveService("QE"))
})
It("add UDS", func() {
@@ -141,7 +139,7 @@ var _ = Describe("podman system connection", func() {
"--default",
"--identity", "~/.ssh/id_rsa",
"QA",
- "ssh://root@server.fubar.com:2222/run/podman/podman.sock",
+ "ssh://root@podman.test:2222/run/podman/podman.sock",
})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -155,8 +153,8 @@ var _ = Describe("podman system connection", func() {
cfg, err := config.ReadCustomConfig()
Expect(err).ShouldNot(HaveOccurred())
- Expect(cfg.Engine.ActiveService).To(BeEmpty())
- Expect(cfg.Engine.ServiceDestinations).To(BeEmpty())
+ Expect(cfg.Engine.ActiveService).Should(BeEmpty())
+ Expect(cfg.Engine.ServiceDestinations).Should(BeEmpty())
}
})
@@ -165,7 +163,7 @@ var _ = Describe("podman system connection", func() {
"--default",
"--identity", "~/.ssh/id_rsa",
"QA",
- "ssh://root@server.fubar.com:2222/run/podman/podman.sock",
+ "ssh://root@podman.test:2222/run/podman/podman.sock",
})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -187,7 +185,7 @@ var _ = Describe("podman system connection", func() {
"--default",
"--identity", "~/.ssh/id_rsa",
name,
- "ssh://root@server.fubar.com:2222/run/podman/podman.sock",
+ "ssh://root@podman.test:2222/run/podman/podman.sock",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
@@ -247,7 +245,7 @@ var _ = Describe("podman system connection", func() {
// podman-remote commands will be executed by ginkgo directly.
SkipIfContainerized("sshd is not available when running in a container")
SkipIfRemote("connection heuristic requires both podman and podman-remote binaries")
- SkipIfNotRootless(fmt.Sprintf("FIXME: setup ssh keys when root. uid(%d) euid(%d)", os.Getuid(), os.Geteuid()))
+ SkipIfNotRootless(fmt.Sprintf("FIXME: set up ssh keys when root. uid(%d) euid(%d)", os.Getuid(), os.Geteuid()))
SkipIfSystemdNotRunning("cannot test connection heuristic if systemd is not running")
SkipIfNotActive("sshd", "cannot test connection heuristic if sshd is not running")
})
diff --git a/test/e2e/system_df_test.go b/test/e2e/system_df_test.go
index 5a23fc0bb..998fa8b59 100644
--- a/test/e2e/system_df_test.go
+++ b/test/e2e/system_df_test.go
@@ -70,6 +70,17 @@ var _ = Describe("podman system df", func() {
Expect(containers[1]).To(Equal("2"), "total containers expected")
Expect(volumes[2]).To(Equal("2"), "total volumes expected")
Expect(volumes[6]).To(Equal("(50%)"), "percentage usage expected")
+
+ session = podmanTest.Podman([]string{"rm", "container1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ session = podmanTest.Podman([]string{"system", "df"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ volumes = strings.Fields(session.OutputToStringArray()[3])
+ // percentages on volumes were being calculated incorrectly. Make sure we only report 100% and not above
+ Expect(volumes[6]).To(Equal("(100%)"), "percentage usage expected")
+
})
It("podman system df image with no tag", func() {
@@ -86,4 +97,17 @@ var _ = Describe("podman system df", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
})
+
+ It("podman system df --format \"{{ json . }}\"", func() {
+ session := podmanTest.Podman([]string{"create", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"system", "df", "--format", "{{ json . }}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.LineInOutputContains("Size"))
+ Expect(session.LineInOutputContains("Reclaimable"))
+ Expect(session.IsJSONOutputValid())
+ })
})
diff --git a/test/e2e/system_reset_test.go b/test/e2e/system_reset_test.go
index 28f2e25ca..075ea435c 100644
--- a/test/e2e/system_reset_test.go
+++ b/test/e2e/system_reset_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ "github.com/containers/podman/v4/pkg/rootless"
. "github.com/containers/podman/v4/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -92,9 +93,12 @@ var _ = Describe("podman system reset", func() {
// TODO: machine tests currently don't run outside of the machine test pkg
// no machines are created here to cleanup
- session = podmanTest.Podman([]string{"machine", "list", "-q"})
- session.WaitWithDefaultTimeout()
- Expect(session).Should(Exit(0))
- Expect(session.OutputToStringArray()).To(BeEmpty())
+ // machine commands are rootless only
+ if rootless.IsRootless() {
+ session = podmanTest.Podman([]string{"machine", "list", "-q"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToStringArray()).To(BeEmpty())
+ }
})
})
diff --git a/test/e2e/systemd_activate_test.go b/test/e2e/systemd_activate_test.go
index aeea4f932..240139b98 100644
--- a/test/e2e/systemd_activate_test.go
+++ b/test/e2e/systemd_activate_test.go
@@ -4,9 +4,11 @@ import (
"errors"
"fmt"
"io/fs"
+ "net"
"os"
"os/exec"
"path/filepath"
+ "strconv"
"syscall"
"time"
@@ -21,6 +23,7 @@ var _ = Describe("Systemd activate", func() {
var tempDir string
var err error
var podmanTest *PodmanTestIntegration
+ var activate string
BeforeEach(func() {
tempDir, err = testUtils.CreateTempDirInTempDir()
@@ -31,17 +34,10 @@ var _ = Describe("Systemd activate", func() {
podmanTest = PodmanTestCreate(tempDir)
podmanTest.Setup()
- })
- AfterEach(func() {
- podmanTest.Cleanup()
- processTestResult(CurrentGinkgoTestDescription())
- })
-
- It("stop podman.service", func() {
SkipIfRemote("Testing stopped service requires both podman and podman-remote binaries")
- activate, err := exec.LookPath("systemd-socket-activate")
+ activate, err = exec.LookPath("systemd-socket-activate")
if err != nil {
activate = "/usr/bin/systemd-socket-activate"
}
@@ -54,14 +50,22 @@ var _ = Describe("Systemd activate", func() {
case err != nil:
Skip(err.Error())
}
+ })
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ processTestResult(CurrentGinkgoTestDescription())
+ })
+
+ It("stop podman.service", func() {
// systemd-socket-activate does not support DNS lookups
host := "127.0.0.1"
port, err := podmanUtils.GetRandomPort()
Expect(err).ToNot(HaveOccurred())
+ addr := net.JoinHostPort(host, strconv.Itoa(port))
activateSession := testUtils.StartSystemExec(activate, []string{
- fmt.Sprintf("--listen=%s:%d", host, port),
+ "--listen", addr,
podmanTest.PodmanBinary,
"--root=" + filepath.Join(tempDir, "server_root"),
"system", "service",
@@ -71,7 +75,7 @@ var _ = Describe("Systemd activate", func() {
// Curried functions for specialized podman calls
podmanRemote := func(args ...string) *testUtils.PodmanSession {
- args = append([]string{"--url", fmt.Sprintf("tcp://%s:%d", host, port)}, args...)
+ args = append([]string{"--url", "tcp://" + addr}, args...)
return testUtils.SystemExec(podmanTest.RemotePodmanBinary, args)
}
@@ -103,4 +107,37 @@ var _ = Describe("Systemd activate", func() {
Expect(abiSession).To(Exit(0))
Expect(abiSession.OutputToString()).To(Equal("true"))
})
+
+ It("invalid systemd file descriptor", func() {
+ host := "127.0.0.1"
+ port, err := podmanUtils.GetRandomPort()
+ Expect(err).ToNot(HaveOccurred())
+
+ addr := net.JoinHostPort(host, strconv.Itoa(port))
+
+ // start systemd activation with datagram socket
+ activateSession := testUtils.StartSystemExec(activate, []string{
+ "--datagram", "--listen", addr,
+ podmanTest.PodmanBinary,
+ "--root=" + filepath.Join(tempDir, "server_root"),
+ "system", "service",
+ "--time=0",
+ })
+ Expect(activateSession.Exited).ShouldNot(Receive(), "Failed to start podman service")
+
+ // we have to wait for systemd-socket-activate to become ready
+ time.Sleep(1 * time.Second)
+
+ // now dial the socket to start podman
+ conn, err := net.Dial("udp", addr)
+ Expect(err).ToNot(HaveOccurred())
+ defer conn.Close()
+ _, err = conn.Write([]byte("test"))
+ Expect(err).ToNot(HaveOccurred())
+
+ // wait for podman to exit
+ activateSession.Wait(10)
+ Expect(activateSession).To(Exit(125))
+ Expect(activateSession.ErrorToString()).To(ContainSubstring("Error: unexpected fd received from systemd: cannot listen on it"))
+ })
})
diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go
index a1a080904..7b3552cc2 100644
--- a/test/e2e/systemd_test.go
+++ b/test/e2e/systemd_test.go
@@ -60,7 +60,7 @@ WantedBy=default.target
Expect(stop).Should(Exit(0))
}()
- create := podmanTest.Podman([]string{"create", "--name", "redis", redis})
+ create := podmanTest.Podman([]string{"create", "--name", "redis", REDIS_IMAGE})
create.WaitWithDefaultTimeout()
Expect(create).Should(Exit(0))
diff --git a/test/e2e/tree_test.go b/test/e2e/tree_test.go
index e1282d2b4..5b552e987 100644
--- a/test/e2e/tree_test.go
+++ b/test/e2e/tree_test.go
@@ -36,7 +36,7 @@ var _ = Describe("Podman image tree", func() {
It("podman image tree", func() {
SkipIfRemote("podman-image-tree is not supported for remote clients")
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
dockerfile := `FROM quay.io/libpod/cirros:latest
RUN mkdir hello
RUN touch test.txt
diff --git a/test/e2e/untag_test.go b/test/e2e/untag_test.go
index 90b0cc95f..b53d654f8 100644
--- a/test/e2e/untag_test.go
+++ b/test/e2e/untag_test.go
@@ -33,8 +33,8 @@ var _ = Describe("Podman untag", func() {
})
It("podman untag all", func() {
- podmanTest.AddImageToRWStore(cirros)
- tags := []string{cirros, "registry.com/foo:bar", "localhost/foo:bar"}
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
+ tags := []string{CIRROS_IMAGE, "registry.com/foo:bar", "localhost/foo:bar"}
cmd := []string{"tag"}
cmd = append(cmd, tags...)
@@ -50,7 +50,7 @@ var _ = Describe("Podman untag", func() {
}
// No arguments -> remove all tags.
- session = podmanTest.Podman([]string{"untag", cirros})
+ session = podmanTest.Podman([]string{"untag", CIRROS_IMAGE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -63,7 +63,7 @@ var _ = Describe("Podman untag", func() {
})
It("podman tag/untag - tag normalization", func() {
- podmanTest.AddImageToRWStore(cirros)
+ podmanTest.AddImageToRWStore(CIRROS_IMAGE)
tests := []struct {
tag, normalized string
@@ -77,7 +77,7 @@ var _ = Describe("Podman untag", func() {
// Make sure that the user input is normalized correctly for
// `podman tag` and `podman untag`.
for _, tt := range tests {
- session := podmanTest.Podman([]string{"tag", cirros, tt.tag})
+ session := podmanTest.Podman([]string{"tag", CIRROS_IMAGE, tt.tag})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -85,7 +85,7 @@ var _ = Describe("Podman untag", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- session = podmanTest.Podman([]string{"untag", cirros, tt.tag})
+ session = podmanTest.Podman([]string{"untag", CIRROS_IMAGE, tt.tag})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go
index 0bf5acbf1..7a975f6a5 100644
--- a/test/e2e/volume_create_test.go
+++ b/test/e2e/volume_create_test.go
@@ -110,15 +110,24 @@ var _ = Describe("Podman volume create", func() {
Expect(session.OutputToString()).To(ContainSubstring("hello"))
})
- It("podman import volume should fail", func() {
+ It("podman import/export volume should fail", func() {
// try import on volume or source which does not exists
- if podmanTest.RemoteTest {
- Skip("Volume export check does not work with a remote client")
- }
+ SkipIfRemote("Volume export check does not work with a remote client")
session := podmanTest.Podman([]string{"volume", "import", "notfound", "notfound.tar"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
+ Expect(session.ErrorToString()).To(ContainSubstring("open notfound.tar: no such file or directory"))
+
+ session = podmanTest.Podman([]string{"volume", "import", "notfound", "-"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(ExitWithError())
+ Expect(session.ErrorToString()).To(ContainSubstring("no such volume notfound"))
+
+ session = podmanTest.Podman([]string{"volume", "export", "notfound"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(ExitWithError())
+ Expect(session.ErrorToString()).To(ContainSubstring("no such volume notfound"))
})
It("podman create volume with bad volume option", func() {
@@ -153,4 +162,19 @@ var _ = Describe("Podman volume create", func() {
Expect(inspectOpts).Should(Exit(0))
Expect(inspectOpts.OutputToString()).To(Equal(optionStrFormatExpect))
})
+
+ It("podman create volume with o=timeout", func() {
+ volName := "testVol"
+ timeout := 10
+ timeoutStr := "10"
+ session := podmanTest.Podman([]string{"volume", "create", "--opt", fmt.Sprintf("o=timeout=%d", timeout), volName})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ inspectTimeout := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .Timeout }}", volName})
+ inspectTimeout.WaitWithDefaultTimeout()
+ Expect(inspectTimeout).Should(Exit(0))
+ Expect(inspectTimeout.OutputToString()).To(Equal(timeoutStr))
+
+ })
})
diff --git a/test/e2e/volume_ls_test.go b/test/e2e/volume_ls_test.go
index 19f87fb8a..dcfb13f4e 100644
--- a/test/e2e/volume_ls_test.go
+++ b/test/e2e/volume_ls_test.go
@@ -152,6 +152,37 @@ var _ = Describe("Podman volume ls", func() {
Expect(lsDangling).Should(Exit(0))
Expect(lsDangling.OutputToString()).To(ContainSubstring(volName1))
})
+
+ It("podman ls volume with --filter name", func() {
+ volName1 := "volume1"
+ session := podmanTest.Podman([]string{"volume", "create", volName1})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ volName2 := "volume2"
+ session2 := podmanTest.Podman([]string{"volume", "create", volName2})
+ session2.WaitWithDefaultTimeout()
+ Expect(session2).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls", "--filter", "name=volume1*"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToStringArray()).To(HaveLen(3))
+ Expect(session.OutputToStringArray()[1]).To(ContainSubstring(volName1))
+ Expect(session.OutputToStringArray()[2]).To(ContainSubstring(volName2))
+
+ session = podmanTest.Podman([]string{"volume", "ls", "--filter", "name=volumex"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToStringArray()).To(BeEmpty())
+
+ session = podmanTest.Podman([]string{"volume", "ls", "--filter", "name=volume1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToStringArray()).To(HaveLen(2))
+ Expect(session.OutputToStringArray()[1]).To(ContainSubstring(volName1))
+ })
+
It("podman ls volume with multiple --filter flag", func() {
session := podmanTest.Podman([]string{"volume", "create", "--label", "foo=bar", "myvol"})
volName := session.OutputToString()
diff --git a/test/e2e/volume_plugin_test.go b/test/e2e/volume_plugin_test.go
index 4700afdb5..b585f8dd8 100644
--- a/test/e2e/volume_plugin_test.go
+++ b/test/e2e/volume_plugin_test.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
. "github.com/containers/podman/v4/test/utils"
+ "github.com/containers/storage/pkg/stringid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
@@ -188,4 +189,71 @@ var _ = Describe("Podman volume plugins", func() {
rmAll.WaitWithDefaultTimeout()
Expect(rmAll).Should(Exit(0))
})
+
+ It("podman volume reload", func() {
+ podmanTest.AddImageToRWStore(volumeTest)
+
+ confFile := filepath.Join(podmanTest.TempDir, "containers.conf")
+ err := os.WriteFile(confFile, []byte(`[engine]
+[engine.volume_plugins]
+testvol5 = "/run/docker/plugins/testvol5.sock"`), 0o644)
+ Expect(err).ToNot(HaveOccurred())
+ os.Setenv("CONTAINERS_CONF", confFile)
+
+ pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
+ err = os.Mkdir(pluginStatePath, 0755)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Keep this distinct within tests to avoid multiple tests using the same plugin.
+ pluginName := "testvol5"
+ ctrName := "pluginCtr"
+ plugin := podmanTest.Podman([]string{"run", "--name", ctrName, "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins",
+ "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath})
+ plugin.WaitWithDefaultTimeout()
+ Expect(plugin).Should(Exit(0))
+
+ localvol := "local-" + stringid.GenerateNonCryptoID()
+ // create local volume
+ session := podmanTest.Podman([]string{"volume", "create", localvol})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+
+ vol1 := "vol1-" + stringid.GenerateNonCryptoID()
+ session = podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, vol1})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+
+ // now create volume in plugin without podman
+ vol2 := "vol2-" + stringid.GenerateNonCryptoID()
+ plugin = podmanTest.Podman([]string{"exec", ctrName, "/usr/local/bin/testvol", "--sock-name", pluginName, "create", vol2})
+ plugin.WaitWithDefaultTimeout()
+ Expect(plugin).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls", "-q"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ Expect(session.OutputToStringArray()).To(ContainElements(localvol, vol1))
+ Expect(session.ErrorToString()).To(Equal("")) // make sure no errors are shown
+
+ plugin = podmanTest.Podman([]string{"exec", ctrName, "/usr/local/bin/testvol", "--sock-name", pluginName, "remove", vol1})
+ plugin.WaitWithDefaultTimeout()
+ Expect(plugin).Should(Exit(0))
+
+ // now reload volumes from plugins
+ session = podmanTest.Podman([]string{"volume", "reload"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ Expect(string(session.Out.Contents())).To(Equal(fmt.Sprintf(`Added:
+%s
+Removed:
+%s
+`, vol2, vol1)))
+ Expect(session.ErrorToString()).To(Equal("")) // make sure no errors are shown
+
+ session = podmanTest.Podman([]string{"volume", "ls", "-q"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ Expect(session.OutputToStringArray()).To(ContainElements(localvol, vol2))
+ Expect(session.ErrorToString()).To(Equal("")) // make no errors are shown
+ })
})
diff --git a/test/framework/framework.go b/test/framework/framework.go
index 57c6bda2a..26e8bf21c 100644
--- a/test/framework/framework.go
+++ b/test/framework/framework.go
@@ -37,7 +37,7 @@ func NilFunc(f *TestFramework) error {
func (t *TestFramework) Setup() {
// Global initialization for the whole framework goes in here
- // Setup the actual test suite
+ // Set up the actual test suite
gomega.Expect(t.setup(t)).To(gomega.Succeed())
}
diff --git a/test/system/010-images.bats b/test/system/010-images.bats
index 257508418..69ed1004c 100644
--- a/test/system/010-images.bats
+++ b/test/system/010-images.bats
@@ -158,6 +158,11 @@ Labels.created_at | 20[0-9-]\\\+T[0-9:]\\\+Z
# start here because this is the first one, fix this problem.
# You can (probably) ignore any subsequent failures showing '@sha'
# in the error output.
+ #
+ # WARNING! This test is likely to fail for an hour or so after
+ # building a new testimage (via build-testimage script), because
+ # two consecutive 'podman images' may result in a one-minute
+ # difference in the "XX minutes ago" output. This is OK to ignore.
run_podman images -a
is "$output" "$images_baseline" "images -a, after pull: same as before"
diff --git a/test/system/015-help.bats b/test/system/015-help.bats
index 5757d51dc..dd5a7ed44 100644
--- a/test/system/015-help.bats
+++ b/test/system/015-help.bats
@@ -34,10 +34,16 @@ function check_help() {
# has no ' [options]'
is "$usage " " $command_string .*" "Usage string matches command"
+ # Strip off the leading command string; we no longer need it
+ usage=$(sed -e "s/^ $command_string \?//" <<<"$usage")
+
# If usage ends in '[command]', recurse into subcommands
- if expr "$usage" : '.*\[command\]$' >/dev/null; then
+ if expr "$usage" : '\[command\]' >/dev/null; then
found[subcommands]=1
- check_help "$@" $cmd
+ # (except for 'podman help', which is a special case)
+ if [[ $cmd != "help" ]]; then
+ check_help "$@" $cmd
+ fi
continue
fi
@@ -49,10 +55,26 @@ function check_help() {
assert "$usage" !~ '[A-Z].*\[option' \
"'options' must precede arguments in usage"
+ # Strip off '[options]' but remember if we've seen it.
+ local has_options=
+ if [[ $usage =~ \[options\] ]]; then
+ has_options=1
+ usage=$(sed -e 's/^\[options\] \?//' <<<"$usage")
+ fi
+
+ # From this point on, remaining argument descriptions must be UPPER CASE
+ # e.g., 'podman cmd [options] arg' or 'podman cmd [arg]' are invalid.
+ assert "$usage" !~ '[a-z]' \
+ "$command_string: argument names must be UPPER CASE"
+
+ # It makes no sense to have an optional arg followed by a mandatory one
+ assert "$usage" !~ '\[.*\] [A-Z]' \
+ "$command_string: optional args must be _after_ required ones"
+
# Cross-check: if usage includes '[options]', there must be a
# longer 'Options:' section in the full --help output; vice-versa,
# if 'Options:' is in full output, usage line must have '[options]'.
- if expr "$usage" : '.*\[option' >/dev/null; then
+ if [[ $has_options ]]; then
if ! expr "$full_help" : ".*Options:" >/dev/null; then
die "$command_string: Usage includes '[options]' but has no 'Options:' subsection"
fi
@@ -95,9 +117,7 @@ function check_help() {
fi
# If usage has required arguments, try running without them.
- # The expression here is 'first capital letter is not in [BRACKETS]'.
- # It is intended to handle 'podman foo [options] ARG' but not ' [ARG]'.
- if expr "$usage" : '[^A-Z]\+ [A-Z]' >/dev/null; then
+ if expr "$usage" : '[A-Z]' >/dev/null; then
# Exceptions: these commands don't work rootless
if is_rootless; then
# "pause is not supported for rootless containers"
@@ -126,25 +146,15 @@ function check_help() {
# the required args, then invoke with one extra. We should get a
# usage error.
if ! expr "$usage" : ".*\.\.\."; then
- # "podman help" can take infinite args, so skip that one
- if [ "$cmd" != "help" ]; then
- # Get the args part of the command line; this should be
- # everything from the first CAPITAL LETTER onward. We
- # don't actually care about the letter itself, so just
- # make it 'X'. And we don't care about [OPTIONAL] brackets
- # either. What we do care about is stuff like 'IMAGE | CTR'
- # which is actually one argument; convert to 'IMAGE-or-CTR'
- local rhs=$(sed -e 's/^[^A-Z]\+[A-Z]/X/' -e 's/ | /-or-/g' <<<"$usage")
- local n_args=$(wc -w <<<"$rhs")
-
- run_podman '?' "$@" $cmd $(seq --format='x%g' 0 $n_args)
- is "$status" 125 \
- "'$usage' indicates a maximum of $n_args args. I invoked it with more, and expected this exit status"
- is "$output" "Error:.* \(takes no arguments\|requires exactly $n_args arg\|accepts at most\|too many arguments\|accepts $n_args arg(s), received\|accepts between .* and .* arg(s), received \)" \
- "'$usage' indicates a maximum of $n_args args. I invoked it with more, and expected one of these error messages"
+ local n_args=$(wc -w <<<"$usage")
- found[fixed_args]=1
- fi
+ run_podman '?' "$@" $cmd $(seq --format='x%g' 0 $n_args)
+ is "$status" 125 \
+ "'$usage' indicates a maximum of $n_args args. I invoked it with more, and expected this exit status"
+ is "$output" "Error:.* \(takes no arguments\|requires exactly $n_args arg\|accepts at most\|too many arguments\|accepts $n_args arg(s), received\|accepts between .* and .* arg(s), received \)" \
+ "'$usage' indicates a maximum of $n_args args. I invoked it with more, and expected one of these error messages"
+
+ found[fixed_args]=1
fi
count=$(expr $count + 1)
@@ -189,7 +199,7 @@ function check_help() {
check_help
# Test for regression of #7273 (spurious "--remote" help on output)
- for helpopt in help --help; do
+ for helpopt in help --help -h; do
run_podman $helpopt
is "${lines[0]}" "Manage pods, containers and images" \
"podman $helpopt: first line of output"
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 283c3aea9..b3e3cef00 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -34,12 +34,8 @@ echo $rand | 0 | $rand
# FIXME: The </dev/null is a hack, necessary because as of 2019-09
# podman-remote has a bug in which it silently slurps up stdin,
# including the output of parse_table (i.e. tests to be run).
- run_podman $expected_rc run $IMAGE "$@" </dev/null
-
- # FIXME: remove conditional once podman-remote issue #4096 is fixed
- if ! is_remote; then
- is "$output" "$expected_output" "podman run $cmd - output"
- fi
+ run_podman $expected_rc run $IMAGE "$@"
+ is "$output" "$expected_output" "podman run $cmd - output"
tests_run=$(expr $tests_run + 1)
done < <(parse_table "$tests")
@@ -380,17 +376,7 @@ json-file | f
while read driver do_check; do
msg=$(random_string 15)
run_podman run --name myctr --log-driver $driver $IMAGE echo $msg
-
- # Simple output check
- # Special case: 'json-file' emits a warning, the rest do not
- # ...but with podman-remote the warning is on the server only
- if [[ $do_check == 'f' ]] && ! is_remote; then # 'f' for 'fallback'
- is "${lines[0]}" ".* level=error msg=\"json-file logging specified but not supported. Choosing k8s-file logging instead\"" \
- "Fallback warning emitted"
- is "${lines[1]}" "$msg" "basic output sanity check (driver=$driver)"
- else
- is "$output" "$msg" "basic output sanity check (driver=$driver)"
- fi
+ is "$output" "$msg" "basic output sanity check (driver=$driver)"
# Simply confirm that podman preserved our argument as-is
run_podman inspect --format '{{.HostConfig.LogConfig.Type}}' myctr
@@ -470,10 +456,10 @@ json-file | f
# dependent, we pick an obscure zone (+1245) that is unlikely to
# collide with any of our testing environments.
#
- # To get a reference timestamp we run 'date' locally; note the explicit
- # strftime() format. We can't use --iso=seconds because GNU date adds
- # a colon to the TZ offset (eg -07:00) whereas alpine does not (-0700).
- run date --date=@1600000000 +%Y-%m-%dT%H:%M:%S%z
+ # To get a reference timestamp we run 'date' locally. This requires
+ # that GNU date output matches that of alpine; this seems to be true
+ # as of testimage:20220615.
+ run date --date=@1600000000 --iso=seconds
expect="$output"
TZ=Pacific/Chatham run_podman run --rm --tz=local $IMAGE date -Iseconds -r $testfile
is "$output" "$expect" "podman run with --tz=local, matches host"
@@ -628,7 +614,8 @@ json-file | f
run_podman image mount $IMAGE
romount="$output"
- run_podman run --rm --rootfs $romount echo "Hello world"
+ # FIXME FIXME FIXME: Remove :O once (if) #14504 is fixed!
+ run_podman run --rm --rootfs $romount:O echo "Hello world"
is "$output" "Hello world"
run_podman image unmount $IMAGE
@@ -743,7 +730,7 @@ EOF
run_podman 125 run --device-cgroup-rule="x 7:* rmw" --rm $IMAGE
is "$output" "Error: invalid device type in device-access-add: x"
run_podman 125 run --device-cgroup-rule="a a:* rmw" --rm $IMAGE
- is "$output" "Error: strconv.ParseInt: parsing \"a\": invalid syntax"
+ is "$output" "Error: strconv.ParseUint: parsing \"a\": invalid syntax"
}
@test "podman run closes stdin" {
@@ -855,4 +842,15 @@ EOF
run_podman rmi $test_image
}
+@test "podman create --security-opt" {
+ run_podman create --security-opt no-new-privileges=true $IMAGE
+ run_podman rm $output
+ run_podman create --security-opt no-new-privileges:true $IMAGE
+ run_podman rm $output
+ run_podman create --security-opt no-new-privileges=false $IMAGE
+ run_podman rm $output
+ run_podman create --security-opt no-new-privileges $IMAGE
+ run_podman rm $output
+}
+
# vim: filetype=sh
diff --git a/test/system/050-stop.bats b/test/system/050-stop.bats
index c2dfba84d..39002512b 100644
--- a/test/system/050-stop.bats
+++ b/test/system/050-stop.bats
@@ -171,4 +171,19 @@ load helpers
run_podman --noout stop -t 0 stopme
is "$output" "" "output should be empty"
}
+
+@test "podman stop, with --rm container" {
+ OCIDir=/run/$(podman_runtime)
+
+ if is_rootless; then
+ OCIDir=/run/user/$(id -u)/$(podman_runtime)
+ fi
+
+ run_podman run --rm -d --name rmstop $IMAGE sleep infinity
+ local cid="$output"
+ run_podman stop rmstop
+
+ # Check the OCI runtime directory has removed.
+ is "$(ls $OCIDir | grep $cid)" "" "The OCI runtime directory should have been removed"
+}
# vim: filetype=sh
diff --git a/test/system/055-rm.bats b/test/system/055-rm.bats
index 69663fafa..0ef2216b8 100644
--- a/test/system/055-rm.bats
+++ b/test/system/055-rm.bats
@@ -52,10 +52,20 @@ load helpers
}
@test "podman rm <-> run --rm race" {
+ OCIDir=/run/$(podman_runtime)
+
+ if is_rootless; then
+ OCIDir=/run/user/$(id -u)/$(podman_runtime)
+ fi
+
# A container's lock is released before attempting to stop it. This opens
# the window for race conditions that led to #9479.
run_podman run --rm -d $IMAGE sleep infinity
+ local cid="$output"
run_podman rm -af
+
+ # Check the OCI runtime directory has removed.
+ is "$(ls $OCIDir | grep $cid)" "" "The OCI runtime directory should have been removed"
}
@test "podman rm --depend" {
diff --git a/test/system/060-mount.bats b/test/system/060-mount.bats
index 7addbd88e..4498e675f 100644
--- a/test/system/060-mount.bats
+++ b/test/system/060-mount.bats
@@ -50,6 +50,10 @@ load helpers
run_podman image mount $IMAGE
mount_path="$output"
+ # Make sure that `mount -a` prints a table
+ run_podman image mount -a
+ is "$output" "$IMAGE .*$mount_path"
+
test -d $mount_path
# Image is custom-built and has a file containing the YMD tag. Check it.
@@ -62,8 +66,8 @@ load helpers
run_podman image mount
is "$output" "$IMAGE *$mount_path" "podman image mount with no args"
- # Clean up
- run_podman image umount $IMAGE
+ # Clean up: -f since we mounted it twice
+ run_podman image umount -f $IMAGE
is "$output" "$iid" "podman image umount: image ID of what was umounted"
run_podman image umount $IMAGE
@@ -83,7 +87,7 @@ load helpers
# Run a container with an image mount
run_podman run --rm --mount type=image,src=$IMAGE,dst=/image-mount $IMAGE diff /etc/os-release /image-mount/etc/os-release
- # Make sure the mount is read only
+ # Make sure the mount is read-only
run_podman 1 run --rm --mount type=image,src=$IMAGE,dst=/image-mount $IMAGE touch /image-mount/read-only
is "$output" "touch: /image-mount/read-only: Read-only file system"
diff --git a/test/system/065-cp.bats b/test/system/065-cp.bats
index cfbeff3ae..8f5abd228 100644
--- a/test/system/065-cp.bats
+++ b/test/system/065-cp.bats
@@ -66,7 +66,7 @@ load helpers
# Container (parent) path does not exist.
run_podman 125 cp $srcdir/hostfile0 cpcontainer:/IdoNotExist/
- is "$output" 'Error: "/IdoNotExist/" could not be found on container cpcontainer: No such file or directory' \
+ is "$output" 'Error: "/IdoNotExist/" could not be found on container cpcontainer: no such file or directory' \
"copy into nonexistent path in container"
run_podman kill cpcontainer
@@ -794,7 +794,7 @@ ${randomcontent[1]}" "$description"
is "$output" "" "output from podman cp 1"
run_podman 125 cp --pause=false $srcdir/$rand_filename2 cpcontainer:/tmp/d2/x/
- is "$output" 'Error: "/tmp/d2/x/" could not be found on container cpcontainer: No such file or directory' "cp will not create nonexistent destination directory"
+ is "$output" 'Error: "/tmp/d2/x/" could not be found on container cpcontainer: no such file or directory' "cp will not create nonexistent destination directory"
run_podman cp --pause=false $srcdir/$rand_filename3 cpcontainer:/tmp/d3/x
is "$output" "" "output from podman cp 3"
@@ -949,9 +949,107 @@ ${randomcontent[1]}" "$description"
run_podman rm -t 0 -f cpcontainer
}
+@test "podman cp --overwrite file - ctr/ctr" {
+ rand_content_file=$(random_string 50)
+ rand_content_dir=$(random_string 50)
+
+ run_podman run -d --name ctr-file $IMAGE sh -c "echo '$rand_content_file' > /tmp/foo; sleep infinity"
+ run_podman run -d --name ctr-dir $IMAGE sh -c "mkdir /tmp/foo; echo '$rand_content_dir' > /tmp/foo/file.txt; sleep infinity"
+
+ # overwrite a directory with a file
+ run_podman 125 cp ctr-file:/tmp/foo ctr-dir:/tmp
+ if ! is_remote; then # remote just returns a 500
+ is "$output" ".* error creating \"/tmp/foo\": .*: file exists.*"
+ fi
+ run_podman cp --overwrite ctr-file:/tmp/foo ctr-dir:/tmp
+ run_podman exec ctr-dir cat /tmp/foo
+ is "$output" "$rand_content_file"
+
+ # reset the ctr-dir container
+ run_podman exec ctr-dir sh -c "rm -rf /tmp/foo; mkdir /tmp/foo; echo '$rand_content_dir' > /tmp/foo/file.txt"
+
+ # overwrite a file with a directory
+ run_podman 125 cp ctr-dir:/tmp/foo ctr-file:/tmp
+ if ! is_remote; then # remote just returns a 500
+ is "$output" ".* error creating \"/tmp/foo\": .*: file exists.*"
+ fi
+ run_podman cp --overwrite ctr-dir:/tmp/foo ctr-file:/tmp
+ run_podman exec ctr-file cat /tmp/foo/file.txt
+ is "$output" "$rand_content_dir"
+
+ run_podman rm -t 0 -f ctr-file ctr-dir
+}
+
+@test "podman cp --overwrite file - ctr/host" {
+ hostdir=$PODMAN_TMPDIR/cp-test
+ mkdir -p $hostdir
+
+ rand_content_file=$(random_string 50)
+ rand_content_dir=$(random_string 50)
+
+ run_podman run -d --name ctr-file $IMAGE sh -c "echo '$rand_content_file' > /tmp/foo; sleep infinity"
+ run_podman run -d --name ctr-dir $IMAGE sh -c "mkdir /tmp/foo; echo '$rand_content_dir' > /tmp/foo/file.txt; sleep infinity"
+
+ # overwrite a directory with a file
+ mkdir $hostdir/foo
+ run_podman 125 cp ctr-file:/tmp/foo $hostdir
+ if ! is_remote; then # remote just returns a 500
+ is "$output" ".* error creating \"/foo\": .*: file exists.*"
+ fi
+ run_podman cp --overwrite ctr-file:/tmp/foo $hostdir
+ is "$(< $hostdir/foo)" "$rand_content_file"
+
+ # overwrite a file with a directory
+ rm -rf $hostdir/foo
+ touch $hostdir/foo
+ run_podman 125 cp ctr-dir:/tmp/foo $hostdir
+ if ! is_remote; then # remote just returns a 500
+ is "$output" ".* error creating \"/foo\": .*: file exists.*"
+ fi
+ run_podman cp --overwrite ctr-dir:/tmp/foo $hostdir
+ is "$(< $hostdir/foo/file.txt)" "$rand_content_dir"
+
+ run_podman rm -t 0 -f ctr-file ctr-dir
+}
+
+@test "podman cp --overwrite file - host/ctr" {
+ hostdir=$PODMAN_TMPDIR/cp-test
+ mkdir -p $hostdir
+
+ rand_content_file=$(random_string 50)
+ rand_content_dir=$(random_string 50)
+
+ run_podman run -d --name ctr-dir $IMAGE sh -c "mkdir /tmp/foo; sleep infinity"
+ run_podman run -d --name ctr-file $IMAGE sh -c "touch /tmp/foo; sleep infinity"
+
+ # overwrite a directory with a file
+ echo "$rand_content_file" > $hostdir/foo
+ run_podman 125 cp $hostdir/foo ctr-dir:/tmp
+ if ! is_remote; then # remote just returns a 500
+ is "$output" ".* error creating \"/tmp/foo\": .*: file exists.*"
+ fi
+ run_podman cp --overwrite $hostdir/foo ctr-dir:/tmp
+ run_podman exec ctr-dir cat /tmp/foo
+ is "$output" "$rand_content_file"
+
+ # overwrite a file with a directory
+ rm -f $hostdir/foo
+ mkdir $hostdir/foo
+ echo "$rand_content_dir" > $hostdir/foo/file.txt
+ run_podman 125 cp $hostdir/foo ctr-file:/tmp
+ if ! is_remote; then # remote just returns a 500
+ is "$output" ".* error creating \"/tmp/foo\": .*: file exists.*"
+ fi
+ run_podman cp --overwrite $hostdir/foo ctr-file:/tmp
+ run_podman exec ctr-file cat /tmp/foo/file.txt
+ is "$output" "$rand_content_dir"
+
+ run_podman rm -t 0 -f ctr-file ctr-dir
+}
+
function teardown() {
# In case any test fails, clean up the container we left behind
- run_podman rm -t 0 f cpcontainer
+ run_podman rm -t 0 -f --ignore cpcontainer
basic_teardown
}
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index b7e0ab447..9fddbaa21 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -496,7 +496,12 @@ Labels.$label_name | $label_value
"image tree: third line"
is "${lines[3]}" "Image Layers" \
"image tree: fourth line"
- is "${lines[4]}" ".* ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[$IMAGE]" \
+ # FIXME: if #14536 is ever fixed, rebuild testimage & s/5/4/ below.
+ # Summary: this should be ${lines[4]}, not [5], and prior to 2022-06-15
+ # it was. Unfortunately, a nightmarish bug interaction makes it impossible
+ # for us to use --squash-all on our testimage. Unless/until that bug is
+ # fixed, we have an extra layer that all we can do is ignore.
+ is "${lines[5]}" ".* ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[$IMAGE]" \
"image tree: first layer line"
is "${lines[-1]}" ".* ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[localhost/build_test:latest]" \
"image tree: last layer line"
@@ -757,7 +762,7 @@ EOF
is "$output" "[no instance of 'Using cache']" "no cache used"
fi
- run_podman rmi -a --force
+ run_podman rmi -f build_test
}
# Caveat lector: this test was mostly copy-pasted from buildah in #9275.
diff --git a/test/system/120-load.bats b/test/system/120-load.bats
index 45e0b3362..7f0bcfd95 100644
--- a/test/system/120-load.bats
+++ b/test/system/120-load.bats
@@ -121,15 +121,31 @@ verify_iid_and_name() {
run_podman untag $IMAGE $newname
run_podman image scp -q ${notme}@localhost::$newname
- expect="Loaded image(s): $newname"
+ expect="Loaded image: $newname"
is "$output" "$expect" "-q silences output"
# Confirm that we have it, and that its digest matches our original
run_podman image inspect --format '{{.Digest}}' $newname
is "$output" "$src_digest" "Digest of re-fetched image matches original"
- # Clean up
+ # test tagging capability
+ run_podman untag $IMAGE $newname
+ run_podman image scp ${notme}@localhost::$newname foobar:123
+
+ run_podman image inspect --format '{{.Digest}}' foobar:123
+ is "$output" "$src_digest" "Digest of re-fetched image matches original"
+
+ # remove root img for transfer back with another name
_sudo $PODMAN image rm $newname
+
+ # get foobar's ID, for an ID transfer test
+ run_podman image inspect --format '{{.ID}}' foobar:123
+ run_podman image scp $output ${notme}@localhost::foobartwo
+
+ _sudo $PODMAN image exists foobartwo
+
+ # Clean up
+ _sudo $PODMAN image rm foobartwo
run_podman untag $IMAGE $newname
# Negative test for nonexistent image.
@@ -142,12 +158,6 @@ verify_iid_and_name() {
run_podman 125 image scp $nope ${notme}@localhost::
is "$output" "Error: $nope: image not known.*" "Pushing nonexistent image"
- # Negative test for copying to a different name
- run_podman 125 image scp $IMAGE ${notme}@localhost::newname:newtag
- is "$output" "Error: cannot specify an image rename: invalid argument" \
- "Pushing with a different name: not allowed"
-
- # FIXME: any point in copying by image ID? What else should we test?
}
diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats
index a9456e03c..96b633a42 100644
--- a/test/system/130-kill.bats
+++ b/test/system/130-kill.bats
@@ -130,4 +130,14 @@ load helpers
is "$output" $cname
}
+@test "podman kill - concurrent stop" {
+ # 14761 - concurrent kill/stop must record the exit code
+ random_name=$(random_string 10)
+ run_podman run -d --replace --name=$random_name alpine sh -c "trap 'echo Received SIGTERM, ignoring' SIGTERM; echo READY; while :; do sleep 0.2; done"
+ $PODMAN stop -t 1 $random_name &
+ run_podman kill $random_name
+ run_podman wait $random_name
+ run_podman rm -f $random_name
+}
+
# vim: filetype=sh
diff --git a/test/system/150-login.bats b/test/system/150-login.bats
index 33b8438bf..dc902d5fe 100644
--- a/test/system/150-login.bats
+++ b/test/system/150-login.bats
@@ -314,7 +314,7 @@ function _test_skopeo_credential_sharing() {
fi
# Make sure socket is closed
- if { exec 3<> /dev/tcp/127.0.0.1/${PODMAN_LOGIN_REGISTRY_PORT}; } &>/dev/null; then
+ if ! port_is_free $PODMAN_LOGIN_REGISTRY_PORT; then
die "Socket still seems open"
fi
}
diff --git a/test/system/160-volumes.bats b/test/system/160-volumes.bats
index 5b0460723..da60112a0 100644
--- a/test/system/160-volumes.bats
+++ b/test/system/160-volumes.bats
@@ -64,6 +64,29 @@ function teardown() {
}
+# Filter volumes by name
+@test "podman volume filter --name" {
+ suffix=$(random_string)
+ prefix="volume"
+
+ for i in 1 2; do
+ myvolume=${prefix}_${i}_${suffix}
+ run_podman volume create $myvolume
+ is "$output" "$myvolume" "output from volume create $i"
+ done
+
+ run_podman volume ls --filter name=${prefix}_1.+ --format "{{.Name}}"
+ is "$output" "${prefix}_1_${suffix}" "--filter name=${prefix}_1.+ shows only one volume"
+
+ # The _1* is intentional as asterisk has different meaning in glob and regexp. Make sure this is regexp
+ run_podman volume ls --filter name=${prefix}_1* --format "{{.Name}}"
+ is "$output" "${prefix}_1_${suffix}.*${prefix}_2_${suffix}.*" "--filter name=${prefix}_1* shows ${prefix}_1_${suffix} and ${prefix}_2_${suffix}"
+
+ for i in 1 2; do
+ run_podman volume rm ${prefix}_${i}_${suffix}
+ done
+}
+
# Named volumes
@test "podman volume create / run" {
myvolume=myvol$(random_string)
@@ -411,4 +434,43 @@ NeedsChown | true
fi
}
+@test "podman --image-volume" {
+ tmpdir=$PODMAN_TMPDIR/volume-test
+ mkdir -p $tmpdir
+ containerfile=$tmpdir/Containerfile
+ cat >$containerfile <<EOF
+FROM $IMAGE
+VOLUME /data
+EOF
+ fs=$(stat -f -c %T .)
+ run_podman build -t volume_image $tmpdir
+
+ containersconf=$tmpdir/containers.conf
+ cat >$containersconf <<EOF
+[engine]
+image_volume_mode="tmpfs"
+EOF
+
+ run_podman run --image-volume tmpfs --rm volume_image stat -f -c %T /data
+ is "$output" "tmpfs" "Should be tmpfs"
+
+ run_podman 1 run --image-volume ignore --rm volume_image stat -f -c %T /data
+ is "$output" "stat: can't read file system information for '/data': No such file or directory" "Should fail with /data does not exists"
+
+ CONTAINERS_CONF="$containersconf" run_podman run --rm volume_image stat -f -c %T /data
+ is "$output" "tmpfs" "Should be tmpfs"
+
+ CONTAINERS_CONF="$containersconf" run_podman run --image-volume bind --rm volume_image stat -f -c %T /data
+ assert "$output" != "tmpfs" "Should match hosts $fs"
+
+ CONTAINERS_CONF="$containersconf" run_podman run --image-volume tmpfs --rm volume_image stat -f -c %T /data
+ is "$output" "tmpfs" "Should be tmpfs"
+
+ CONTAINERS_CONF="$containersconf" run_podman 1 run --image-volume ignore --rm volume_image stat -f -c %T /data
+ is "$output" "stat: can't read file system information for '/data': No such file or directory" "Should fail with /data does not exists"
+
+ run_podman rm --all --force -t 0
+ run_podman image rm --force localhost/volume_image
+}
+
# vim: filetype=sh
diff --git a/test/system/170-run-userns.bats b/test/system/170-run-userns.bats
index b80351902..84788a7f4 100644
--- a/test/system/170-run-userns.bats
+++ b/test/system/170-run-userns.bats
@@ -38,10 +38,12 @@ function _require_crun() {
@test "rootful pod with custom ID mapping" {
skip_if_rootless "does not work rootless - rootful feature"
- skip_if_remote "remote --uidmap is broken (see #14233)"
random_pod_name=$(random_string 30)
run_podman pod create --uidmap 0:200000:5000 --name=$random_pod_name
run_podman pod start $random_pod_name
+ run_podman pod inspect --format '{{.InfraContainerID}}' $random_pod_name
+ run podman inspect --format '{{.HostConfig.IDMappings.UIDMap}}' $output
+ is "$output" ".*0:200000:5000" "UID Map Successful"
# Remove the pod and the pause image
run_podman pod rm $random_pod_name
@@ -109,15 +111,30 @@ EOF
}
@test "podman userns=nomap" {
- skip_if_not_rootless "--userns=nomap only works in rootless mode"
- ns_user=$(id -un)
- baseuid=$(egrep "${ns_user}:" /etc/subuid | cut -f2 -d:)
- test ! -z ${baseuid} || skip "no IDs allocated for user ${ns_user}"
+ if is_rootless; then
+ ns_user=$(id -un)
+ baseuid=$(egrep "${ns_user}:" /etc/subuid | cut -f2 -d:)
+ test ! -z ${baseuid} || skip "no IDs allocated for user ${ns_user}"
+
+ test_name="test_$(random_string 12)"
+ run_podman run -d --userns=nomap $IMAGE sleep 100
+ cid=${output}
+ run_podman top ${cid} huser
+ is "${output}" "HUSER.*${baseuid}" "Container should start with baseuid from /etc/subuid not user UID"
+ run_podman rm -t 0 --force ${cid}
+ else
+ run_podman 125 run -d --userns=nomap $IMAGE sleep 100
+ is "${output}" "Error: nomap is only supported in rootless mode" "Container should fail to start since nomap is not suppored in rootful mode"
+ fi
+}
- test_name="test_$(random_string 12)"
- run_podman run -d --userns=nomap $IMAGE sleep 100
- cid=${output}
- run_podman top ${cid} huser
- is "${output}" "HUSER.*${baseuid}" "Container should start with baseuid from /etc/subuid not user UID"
- run_podman rm -t 0 --force ${cid}
+@test "podman userns=keep-id" {
+ if is_rootless; then
+ user=$(id -u)
+ run_podman run --rm --userns=keep-id $IMAGE id -u
+ is "${output}" "$user" "Container should run as the current user"
+ else
+ run_podman 125 run --rm --userns=keep-id $IMAGE id -u
+ is "${output}" "Error: keep-id is only supported in rootless mode" "Container should fail to start since keep-id is not suppored in rootful mode"
+ fi
}
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index 4250f2680..b93f3f92f 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -332,11 +332,18 @@ EOF
@test "podman pod create --share" {
local pod_name="$(random_string 10 | tr A-Z a-z)"
run_podman 125 pod create --share bogus --name $pod_name
- is "$output" ".*Invalid kernel namespace to share: bogus. Options are: cgroup, ipc, net, pid, uts or none" \
+ is "$output" ".*invalid kernel namespace to share: bogus. Options are: cgroup, ipc, net, pid, uts or none" \
"pod test for bogus --share option"
run_podman pod create --share ipc --name $pod_name
+ run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}"
+ is "$output" "[ipc]"
run_podman run --rm --pod $pod_name --hostname foobar $IMAGE hostname
is "$output" "foobar" "--hostname should work with non share UTS namespace"
+ run_podman pod create --share +pid --replace --name $pod_name
+ run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}"
+ for ns in uts pid ipc net; do
+ is "$output" ".*$ns"
+ done
}
@test "podman pod create --pod new:$POD --hostname" {
@@ -387,20 +394,20 @@ EOF
is "$output" "false" "Default network sharing should be false"
run_podman pod rm test
- run_podman pod create --name test --share ipc --network private
+ run_podman pod create --share ipc --network private test
run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
is "$output" "false" "Private network sharing with only ipc should be false"
run_podman pod rm test
- run_podman pod create --name test --share net --network private
- run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
+ local name="$(random_string 10 | tr A-Z a-z)"
+ run_podman pod create --name $name --share net --network private
+ run_podman pod inspect $name --format {{.InfraConfig.HostNetwork}}
is "$output" "false" "Private network sharing with only net should be false"
- run_podman pod rm test
- run_podman pod create --name test --share net --network host
- run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
+ run_podman pod create --share net --network host --replace $name
+ run_podman pod inspect $name --format {{.InfraConfig.HostNetwork}}
is "$output" "true" "Host network sharing with only net should be true"
- run_podman pod rm test
+ run_podman pod rm $name
run_podman pod create --name test --share ipc --network host
run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
@@ -465,4 +472,49 @@ spec:
run_podman pod rm $name-pod
}
+@test "pod resource limits" {
+ skip_if_remote "resource limits only implemented on non-remote"
+ if is_rootless; then
+ skip "only meaningful for rootful"
+ fi
+
+ local name1="resources1"
+ run_podman --cgroup-manager=systemd pod create --name=$name1 --cpus=5 --memory=10m
+ run_podman --cgroup-manager=systemd pod start $name1
+ run_podman pod inspect --format '{{.CgroupPath}}' $name1
+ local path1="$output"
+ local actual1=$(< /sys/fs/cgroup/$path1/cpu.max)
+ is "$actual1" "500000 100000" "resource limits set properly"
+ local actual2=$(< /sys/fs/cgroup/$path1/memory.max)
+ is "$actual2" "10485760" "resource limits set properly"
+ run_podman pod --cgroup-manager=systemd rm -f $name1
+
+ local name2="resources2"
+ run_podman --cgroup-manager=cgroupfs pod create --cpus=5 --memory=10m --name=$name2
+ run_podman --cgroup-manager=cgroupfs pod start $name2
+ run_podman pod inspect --format '{{.CgroupPath}}' $name2
+ local path2="$output"
+ local actual2=$(< /sys/fs/cgroup/$path2/cpu.max)
+ is "$actual2" "500000 100000" "resource limits set properly"
+ local actual2=$(< /sys/fs/cgroup/$path2/memory.max)
+ is "$actual2" "10485760" "resource limits set properly"
+ run_podman --cgroup-manager=cgroupfs pod rm $name2
+}
+
+@test "podman pod ps doesn't race with pod rm" {
+ # create a few pods
+ for i in {0..10}; do
+ run_podman pod create
+ done
+
+ # and delete them
+ $PODMAN pod rm -a &
+
+ # pod ps should not fail while pods are deleted
+ run_podman pod ps -q
+
+ # wait for pod rm -a
+ wait
+}
+
# vim: filetype=sh
diff --git a/test/system/250-systemd.bats b/test/system/250-systemd.bats
index 567fa89c1..fc3c33975 100644
--- a/test/system/250-systemd.bats
+++ b/test/system/250-systemd.bats
@@ -27,7 +27,6 @@ function teardown() {
rm -f "$UNIT_FILE"
systemctl daemon-reload
fi
- run_podman rmi -a
basic_teardown
}
@@ -53,10 +52,17 @@ function service_setup() {
# Helper to stop a systemd service running a container
function service_cleanup() {
- local status=$1
run systemctl stop "$SERVICE_NAME"
assert $status -eq 0 "Error stopping systemd unit $SERVICE_NAME: $output"
+ # Regression test for #11304: confirm that unit stops into correct state
+ local expected_state="$1"
+ if [[ -n "$expected_state" ]]; then
+ run systemctl show --property=ActiveState "$SERVICE_NAME"
+ assert "$output" = "ActiveState=$expected_state" \
+ "state of service after systemctl stop"
+ fi
+
run systemctl disable "$SERVICE_NAME"
assert $status -eq 0 "Error disabling systemd unit $SERVICE_NAME: $output"
@@ -80,12 +86,6 @@ function service_cleanup() {
run_podman logs $cname
is "$output" ".*WAITING.*" "running is waiting for signal"
- # Exercise `podman auto-update`.
- # TODO: this will at least run auto-update code but won't perform an update
- # since the image didn't change. We need to improve on that and run
- # an image from a local registry instead.
- run_podman auto-update
-
# All good. Stop service, clean up.
# Also make sure the service is in the `inactive` state (see #11304).
service_cleanup inactive
@@ -94,26 +94,28 @@ function service_cleanup() {
@test "podman autoupdate local" {
# Note that the entrypoint may be a JSON string which requires preserving the quotes (see #12477)
cname=$(random_string)
- run_podman create --name $cname --label "io.containers.autoupdate=local" --entrypoint '["top"]' $IMAGE
+
+ # Create a scratch image (copy of our regular one)
+ image_copy=base$(random_string | tr A-Z a-z)
+ run_podman tag $IMAGE $image_copy
+
+ # Create a container based on that
+ run_podman create --name $cname --label "io.containers.autoupdate=local" --entrypoint '["top"]' $image_copy
# Start systemd service to run this container
service_setup
# Give container time to start; make sure output looks top-like
- sleep 2
- run_podman logs $cname
- is "$output" ".*Load average:.*" "running container 'top'-like output"
-
- # Save the container id before updating
- run_podman ps --format '{{.ID}}'
+ wait_for_output 'Load average' $cname
# Run auto-update and check that it restarted the container
- run_podman commit --change "CMD=/bin/bash" $cname $IMAGE
+ run_podman commit --change "CMD=/bin/bash" $cname $image_copy
run_podman auto-update
is "$output" ".*$SERVICE_NAME.*" "autoupdate local restarted container"
# All good. Stop service, clean up.
service_cleanup
+ run_podman rmi $image_copy
}
# These tests can fail in dev. environment because of SELinux.
@@ -241,6 +243,7 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
run_podman rm -f $cname
run_podman pod rm -f $podname
+ run_podman rmi $(pause_image)
}
@test "podman generate - systemd template only used on --new" {
@@ -292,15 +295,17 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
run_podman network rm -f $netname
}
-@test "podman-play-kube@.service template" {
+@test "podman-kube@.service template" {
skip_if_remote "systemd units do not work with remote clients"
# If running from a podman source directory, build and use the source
# version of the play-kube-@ unit file
- unit_name="podman-play-kube@.service"
+ unit_name="podman-kube@.service"
unit_file="contrib/systemd/system/${unit_name}"
if [[ -e ${unit_file}.in ]]; then
echo "# [Building & using $unit_name from source]" >&3
+ # Force regenerating unit file (existing one may have /usr/bin path)
+ rm -f $unit_file
BINDIR=$(dirname $PODMAN) make $unit_file
cp $unit_file $UNIT_DIR/$unit_name
fi
@@ -324,7 +329,7 @@ spec:
EOF
# Dispatch the YAML file
- service_name="podman-play-kube@$(systemd-escape $yaml_source).service"
+ service_name="podman-kube@$(systemd-escape $yaml_source).service"
systemctl start $service_name
systemctl is-active $service_name
@@ -366,6 +371,33 @@ EOF
systemctl stop $service_name
run_podman 1 container exists $service_container
run_podman 1 pod exists test_pod
+ run_podman rmi $(pause_image)
+ rm -f $UNIT_DIR/$unit_name
+}
+
+@test "podman-system-service containers survive service stop" {
+ skip_if_remote "N/A under podman-remote"
+
+ SERVICE_NAME=podman-service-$(random_string)
+ port=$(random_free_port)
+ URL=tcp://127.0.0.1:$port
+
+ systemd-run --unit=$SERVICE_NAME $PODMAN system service $URL --time=0
+ wait_for_port 127.0.0.1 $port
+
+ # Start a long-running container.
+ cname=keeps-running
+ run_podman --url $URL run -d --name $cname $IMAGE top -d 2
+
+ run_podman container inspect -l --format "{{.State.Running}}"
+ is "$output" "true" "This should never fail"
+
+ systemctl stop $SERVICE_NAME
+
+ run_podman container inspect $cname --format "{{.State.Running}}"
+ is "$output" "true" "Container is still running after podman server stops"
+
+ run_podman rm -f -t 0 $cname
}
# vim: filetype=sh
diff --git a/test/system/410-selinux.bats b/test/system/410-selinux.bats
index 21ac4cb8f..d437465a4 100644
--- a/test/system/410-selinux.bats
+++ b/test/system/410-selinux.bats
@@ -205,7 +205,11 @@ function check_label() {
# from /proc/thread-self/attr/exec`: .* unable to assign
# to /proc/self/attr/keycreate`: .* unable to process
crun) expect="\`/proc/.*\`: OCI runtime error: unable to \(assign\|process\) security attribute" ;;
- runc) expect="OCI runtime error: .*: failed to set /proc/self/attr/keycreate on procfs" ;;
+ # runc 1.1 changed the error message because of new selinux pkg that uses standard os.PathError, see
+ # https://github.com/opencontainers/selinux/pull/148/commits/a5dc47f74c56922d58ead05d1fdcc5f7f52d5f4e
+ # from failed to set /proc/self/attr/keycreate on procfs
+ # to write /proc/self/attr/keycreate: invalid argument
+ runc) expect="OCI runtime error: .*: \(failed to set|write\) /proc/self/attr/keycreate" ;;
*) skip "Unknown runtime '$runtime'";;
esac
diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats
index 3db0804d1..50eb15216 100644
--- a/test/system/500-networking.bats
+++ b/test/system/500-networking.bats
@@ -111,6 +111,10 @@ load helpers
$IMAGE nc -l -n -v -p $myport
cid="$output"
+ # check that podman stores the network info correctly when a userns is used (#14465)
+ run_podman container inspect --format "{{.NetworkSettings.SandboxKey}}" $cid
+ assert "$output" =~ ".*/netns/netns-.*" "Netns path should be set"
+
wait_for_output "listening on .*:$myport .*" $cid
# emit random string, and check it
@@ -161,6 +165,9 @@ load helpers
run_podman pod rm $pod_name
is "$output" "$pid" "Only ID in output (no extra errors)"
+
+ # Clean up
+ run_podman rmi $(pause_image)
}
@test "podman run with slirp4ns assigns correct addresses to /etc/hosts" {
@@ -352,7 +359,7 @@ load helpers
run curl -s $SERVER/index.txt
is "$output" "$random_1" "curl 127.0.0.1:/index.txt"
- # cleanup the container
+ # clean up the container
run_podman rm -t 0 -f $cid
# test that we cannot remove the default network
@@ -542,7 +549,7 @@ load helpers
run curl --max-time 3 -s $SERVER/index.txt
is "$output" "$random_1" "curl 127.0.0.1:/index.txt should still work"
- # cleanup
+ # clean up
run_podman rm -t 0 -f $cid $background_cid
run_podman network rm -t 0 -f $netname $netname2
}
@@ -615,7 +622,7 @@ load helpers
run_podman rm -t 0 -f $cid
done
- # Cleanup network
+ # Clean up network
run_podman network rm -t 0 -f $netname
}
@@ -669,17 +676,21 @@ EOF
@test "podman run port forward range" {
for netmode in bridge slirp4netns:port_handler=slirp4netns slirp4netns:port_handler=rootlesskit; do
- local port=$(random_free_port)
- local end_port=$(( $port + 2 ))
- local range="$port-$end_port:$port-$end_port"
+ local range=$(random_free_port_range 3)
+ # die() inside $(...) does not actually stop us.
+ assert "$range" != "" "Could not find free port range"
+
+ local port="${range%-*}"
+ local end_port="${range#*-}"
local random=$(random_string)
- run_podman run --network $netmode -p "$range" -d $IMAGE sleep inf
+ run_podman run --network $netmode -p "$range:$range" -d $IMAGE sleep inf
cid="$output"
for port in $(seq $port $end_port); do
run_podman exec -d $cid nc -l -p $port -e /bin/cat
- # -w 1 adds a 1 second timeout, for some reason ubuntus ncat doesn't close the connection on EOF,
- # other options to change this are not portable across distros but -w seems to work
+ # -w 1 adds a 1 second timeout. For some reason, ubuntu's ncat
+ # doesn't close the connection on EOF, and other options to
+ # change this are not portable across distros. -w seems to work.
run nc -w 1 127.0.0.1 $port <<<$random
is "$output" "$random" "ncat got data back (netmode=$netmode port=$port)"
done
diff --git a/test/system/520-checkpoint.bats b/test/system/520-checkpoint.bats
index c16a8c35d..7f60f01b3 100644
--- a/test/system/520-checkpoint.bats
+++ b/test/system/520-checkpoint.bats
@@ -170,4 +170,34 @@ function teardown() {
# FIXME: test --leave-running
+@test "podman checkpoint --file-locks" {
+ action='flock test.lock sh -c "while [ -e /wait ];do sleep 0.5;done;for i in 1 2 3;do echo \$i;sleep 0.5;done"'
+ run_podman run -d $IMAGE sh -c "touch /wait; touch test.lock; echo READY; $action & $action & wait"
+ local cid="$output"
+
+ # Wait for container to start emitting output
+ wait_for_ready $cid
+
+ # Checkpoint, and confirm via inspect
+ run_podman container checkpoint --file-locks $cid
+ is "$output" "$cid" "podman container checkpoint"
+
+ run_podman container inspect \
+ --format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $cid
+ is "$output" "exited:false:false:true" "State. Status:Running:Pause:Checkpointed"
+
+ # Restart immediately and confirm state
+ run_podman container restore --file-locks $cid
+ is "$output" "$cid" "podman container restore"
+
+ # Signal the container to continue; this is where the 1-2-3s will come from
+ run_podman exec $cid rm /wait
+
+ # Wait for the container to stop
+ run_podman wait $cid
+
+ run_podman logs $cid
+ trim=$(sed -z -e 's/[\r\n]\+//g' <<<"$output")
+ is "$trim" "READY123123" "File lock restored"
+}
# vim: filetype=sh
diff --git a/test/system/600-completion.bats b/test/system/600-completion.bats
index 018e95e78..cb4a2c5f8 100644
--- a/test/system/600-completion.bats
+++ b/test/system/600-completion.bats
@@ -8,6 +8,16 @@
load helpers
+function setup() {
+ # $PODMAN may be a space-separated string, e.g. if we include a --url.
+ local -a podman_as_array=($PODMAN)
+ # __completeNoDesc must be the first arg if we running the completion cmd
+ # set the var for the run_completion function
+ PODMAN_COMPLETION="${podman_as_array[0]} __completeNoDesc ${podman_as_array[@]:1}"
+
+ basic_setup
+}
+
# Returns true if we are able to podman-pause
function _can_pause() {
# Even though we're just trying completion, not an actual unpause,
@@ -88,8 +98,14 @@ function check_shell_completion() {
continue 2
fi
+ name=$random_container_name
+ # special case podman cp suggest containers names with a colon
+ if [[ $cmd = "cp" ]]; then
+ name="$name:"
+ fi
+
run_completion "$@" $cmd "${extra_args[@]}" ""
- is "$output" ".*-$random_container_name${nl}" \
+ is "$output" ".*-$name${nl}" \
"$* $cmd: actual container listed in suggestions"
match=true
@@ -175,7 +191,7 @@ function check_shell_completion() {
_check_completion_end NoSpace
else
_check_completion_end Default
- assert "${#lines[@]}" -eq 2 "$* $cmd: Suggestions are in the output"
+ _check_no_suggestions
fi
;;
@@ -205,16 +221,7 @@ function check_shell_completion() {
if [[ ! ${args##* } =~ "..." ]]; then
run_completion "$@" $cmd "${extra_args[@]}" ""
_check_completion_end NoFileComp
- if [ ${#lines[@]} -gt 2 ]; then
- # checking for line count is not enough since we may include additional debug output
- # lines starting with [Debug] are allowed
- i=0
- length=$(( ${#lines[@]} - 2 ))
- while [[ i -lt length ]]; do
- assert "${lines[$i]:0:7}" == "[Debug]" "Suggestions are in the output"
- i=$(( i + 1 ))
- done
- fi
+ _check_no_suggestions
fi
done
@@ -231,6 +238,24 @@ function _check_completion_end() {
is "${lines[-1]}" "Completion ended with directive: ShellCompDirective$1" "Completion has wrong ShellCompDirective set"
}
+# Check that there are no suggestions in the output.
+# We could only check stdout and not stderr but this is not possible with bats.
+# By default we always have two extra lines at the end for the ShellCompDirective.
+# Then we could also have other extra lines for debugging, they will always start
+# with [Debug], e.g. `[Debug] [Error] no container with name or ID "t12" found: no such container`.
+function _check_no_suggestions() {
+ if [ ${#lines[@]} -gt 2 ]; then
+ # Checking for line count is not enough since we may include additional debug output.
+ # Lines starting with [Debug] are allowed.
+ local i=0
+ length=$((${#lines[@]} - 2))
+ while [[ i -lt length ]]; do
+ assert "${lines[$i]:0:7}" == "[Debug]" "Unexpected non-Debug output line: ${lines[$i]}"
+ i=$((i + 1))
+ done
+ fi
+}
+
@test "podman shell completion test" {
@@ -280,11 +305,6 @@ function _check_completion_end() {
# create secret
run_podman secret create $random_secret_name $secret_file
- # $PODMAN may be a space-separated string, e.g. if we include a --url.
- local -a podman_as_array=($PODMAN)
- # __completeNoDesc must be the first arg if we running the completion cmd
- PODMAN_COMPLETION="${podman_as_array[0]} __completeNoDesc ${podman_as_array[@]:1}"
-
# Called with no args -- start with 'podman --help'. check_shell_completion() will
# recurse for any subcommands.
check_shell_completion
@@ -316,3 +336,51 @@ function _check_completion_end() {
done <<<"$output"
}
+
+@test "podman shell completion for paths in container/image" {
+ skip_if_remote "mounting via remote does not work"
+ for cmd in create run; do
+ run_completion $cmd $IMAGE ""
+ assert "$output" =~ ".*^/etc/\$.*" "etc directory suggested (cmd: podman $cmd)"
+ assert "$output" =~ ".*^/home/\$.*" "home directory suggested (cmd: podman $cmd)"
+ assert "$output" =~ ".*^/root/\$.*" "root directory suggested (cmd: podman $cmd)"
+
+ # check completion for subdirectory
+ run_completion $cmd $IMAGE "/etc"
+ # It should be safe to assume the os-release file always exists in $IMAGE
+ assert "$output" =~ ".*^/etc/os-release\$.*" "/etc files suggested (cmd: podman $cmd /etc)"
+ # check completion for partial file name
+ run_completion $cmd $IMAGE "/etc/os-"
+ assert "$output" =~ ".*^/etc/os-release\$.*" "/etc files suggested (cmd: podman $cmd /etc/os-)"
+
+ # check completion with relative path components
+ # It is important the we will still use the image root and not escape to the host
+ run_completion $cmd $IMAGE "../../"
+ assert "$output" =~ ".*^../../etc/\$.*" "relative etc directory suggested (cmd: podman $cmd ../../)"
+ assert "$output" =~ ".*^../../home/\$.*" "relative home directory suggested (cmd: podman $cmd ../../)"
+ done
+
+ random_name=$(random_string 30)
+ random_file=$(random_string 30)
+ run_podman run --name $random_name $IMAGE sh -c "touch /tmp/$random_file && touch /tmp/${random_file}2 && mkdir /emptydir"
+
+ # check completion for podman cp
+ run_completion cp ""
+ assert "$output" =~ ".*^$random_name\:\$.*" "podman cp suggest container names"
+
+ run_completion cp "$random_name:"
+ assert "$output" =~ ".*^$random_name\:/etc/\$.*" "podman cp suggest paths in container"
+
+ run_completion cp "$random_name:/tmp"
+ assert "$output" =~ ".*^$random_name\:/tmp/$random_file\$.*" "podman cp suggest custom file in container"
+
+ run_completion cp "$random_name:/tmp/$random_file"
+ assert "$output" =~ ".*^$random_name\:/tmp/$random_file\$.*" "podman cp suggest /tmp/$random_file file in container"
+ assert "$output" =~ ".*^$random_name\:/tmp/${random_file}2\$.*" "podman cp suggest /tmp/${random_file}2 file in container"
+
+ run_completion cp "$random_name:/emptydir"
+ assert "$output" =~ ".*^$random_name\:/emptydir/\$.*ShellCompDirectiveNoSpace" "podman cp suggest empty dir with no space directive (:2)"
+
+ # cleanup container
+ run_podman rm $random_name
+}
diff --git a/test/system/build-testimage b/test/system/build-testimage
index eb5849b5e..a0d831abb 100755
--- a/test/system/build-testimage
+++ b/test/system/build-testimage
@@ -12,8 +12,8 @@
# still need a fedora image for that.
#
-# Buildah binary
-BUILDAH=${BUILDAH:-buildah}
+# Podman binary to use
+PODMAN=${PODMAN:-$(pwd)/bin/podman}
# Tag for this new image
YMD=$(date +%Y%m%d)
@@ -25,7 +25,8 @@ if [ -z "$create_script" ]; then
fi
# Creation timestamp, Zulu time
-create_time_z=$(env TZ=UTC date +'%Y-%m-%dT%H:%M:%SZ')
+create_time_t=$(date +%s)
+create_time_z=$(env TZ=UTC date --date=@$create_time_t +'%Y-%m-%dT%H:%M:%SZ')
set -ex
@@ -60,19 +61,33 @@ chmod 755 pause
# alpine because it's small and light and reliable
# - check for updates @ https://hub.docker.com/_/alpine
# busybox-extras provides httpd needed in 500-networking.bats
-cat >Containerfile <<EOF
+#
+# Two Containerfiles, because we have to do the image build in two parts,
+# which I think are easier to describe in reverse order:
+# 2) The second build has to be run with --timestamp=CONSTANT, otherwise
+# the Created test in 110-history.bats may fail (#14456); but
+# 1) the timestamp of the testimage-id file must be preserved (see above),
+# and 'build --timestamp' clobbers all file timestamps.
+#
+cat >Containerfile1 <<EOF
ARG REPO=please-override-repo
-FROM docker.io/\${REPO}/alpine:3.13.5
+FROM docker.io/\${REPO}/alpine:3.16.0
RUN apk add busybox-extras
ADD testimage-id pause /home/podman/
+EOF
+
+cat >Containerfile2 <<EOF
+FROM localhost/interim-image:latest
LABEL created_by=$create_script
LABEL created_at=$create_time_z
WORKDIR /home/podman
CMD ["/bin/echo", "This container is intended for podman CI testing"]
EOF
-# --squash-all : needed by 'tree' test in 070-build.bats
-podman rmi -f testimage &> /dev/null || true
+# Start from scratch
+testimg_base=quay.io/libpod/testimage
+testimg=${testimg_base}:$YMD
+$PODMAN rmi -f $testimg &> /dev/null || true
# There should always be a testimage tagged ':0000000<X>' (eight digits,
# zero-padded sequence ID) in the same location; this is used by tests
@@ -80,7 +95,7 @@ podman rmi -f testimage &> /dev/null || true
# if ever need to change, nor in fact does it even have to be a copy of
# this testimage since all we use it for is 'true'.
# However, it does need to be multiarch :-(
-zerotag_latest=$(skopeo list-tags docker://quay.io/libpod/testimage |\
+zerotag_latest=$(skopeo list-tags docker://${testimg_base} |\
jq -r '.Tags[]' |\
sort --version-sort |\
grep '^000' |\
@@ -88,12 +103,9 @@ zerotag_latest=$(skopeo list-tags docker://quay.io/libpod/testimage |\
zerotag_next=$(printf "%08d" $((zerotag_latest + 1)))
# We don't always need to push the :00xx image, but build it anyway.
-zeroimg=quay.io/libpod/testimage:${zerotag_next}
-buildah manifest create $zeroimg
+zeroimg=${testimg_base}:${zerotag_next}
+$PODMAN manifest create $zeroimg
-# We need to use buildah because (as of 2021-02-23) only buildah has --manifest
-# and because Dan says arch emulation is not currently working on podman
-# (no further details).
# Arch emulation on Fedora requires the qemu-user-static package.
for arch in amd64 arm64 ppc64le s390x;do
# docker.io repo is usually the same name as the desired arch; except
@@ -104,16 +116,32 @@ for arch in amd64 arm64 ppc64le s390x;do
repo="${repo}v8"
fi
- ${BUILDAH} bud \
- --arch=$arch \
- --build-arg REPO=$repo \
- --manifest=testimage \
- --squash \
- .
+ # First build defines REPO, but does not have --timestamp
+ $PODMAN build \
+ --arch=$arch \
+ --build-arg REPO=$repo \
+ --squash-all \
+ --file Containerfile1 \
+ -t interim-image \
+ .
+
+ # Second build forces --timestamp, and adds to manifest. Unfortunately
+ # we can't use --squash-all with --timestamp: *all* timestamps get
+ # clobbered. This is not fixable (#14536).
+ $PODMAN build \
+ --arch=$arch \
+ --timestamp=$create_time_t \
+ --manifest=$testimg \
+ --squash \
+ --file Containerfile2 \
+ .
+
+ # No longer need the interim image
+ $PODMAN rmi interim-image
# The zero-tag image
- ${BUILDAH} pull --arch $arch docker.io/$repo/busybox:1.33.1
- ${BUILDAH} manifest add $zeroimg docker.io/$repo/busybox:1.33.1
+ $PODMAN pull --arch $arch docker.io/$repo/busybox:1.34.1
+ $PODMAN manifest add $zeroimg docker.io/$repo/busybox:1.34.1
done
# Clean up
@@ -121,14 +149,12 @@ cd /tmp
rm -rf $tmpdir
# Tag image and push (all arches) to quay.
-remote_tag=quay.io/libpod/testimage:$YMD
-podman tag testimage ${remote_tag}
cat <<EOF
If you're happy with these images, run:
- ${BUILDAH} manifest push --all ${remote_tag} docker://${remote_tag}
- ${BUILDAH} manifest push --all ${zeroimg} docker://${zeroimg}
+ podman manifest push --all ${testimg} docker://${testimg}
+ podman manifest push --all ${zeroimg} docker://${zeroimg}
(You do not always need to push the :0000 image)
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index 6868f2691..ceac48036 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -7,14 +7,14 @@ PODMAN=${PODMAN:-podman}
PODMAN_TEST_IMAGE_REGISTRY=${PODMAN_TEST_IMAGE_REGISTRY:-"quay.io"}
PODMAN_TEST_IMAGE_USER=${PODMAN_TEST_IMAGE_USER:-"libpod"}
PODMAN_TEST_IMAGE_NAME=${PODMAN_TEST_IMAGE_NAME:-"testimage"}
-PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"20210610"}
+PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"20220615"}
PODMAN_TEST_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_TEST_IMAGE_TAG"
PODMAN_TEST_IMAGE_ID=
# Remote image that we *DO NOT* fetch or keep by default; used for testing pull
# This has changed in 2021, from 0 through 3, various iterations of getting
# multiarch to work. It should change only very rarely.
-PODMAN_NONLOCAL_IMAGE_TAG=${PODMAN_NONLOCAL_IMAGE_TAG:-"00000003"}
+PODMAN_NONLOCAL_IMAGE_TAG=${PODMAN_NONLOCAL_IMAGE_TAG:-"00000004"}
PODMAN_NONLOCAL_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_NONLOCAL_IMAGE_TAG"
# Because who wants to spell that out each time?
@@ -284,7 +284,7 @@ function random_free_port() {
local port
for port in $(shuf -i ${range}); do
- if ! { exec {unused_fd}<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
+ if port_is_free $port; then
echo $port
return
fi
@@ -293,6 +293,37 @@ function random_free_port() {
die "Could not find open port in range $range"
}
+function random_free_port_range() {
+ local size=${1?Usage: random_free_port_range SIZE (as in, number of ports)}
+
+ local maxtries=10
+ while [[ $maxtries -gt 0 ]]; do
+ local firstport=$(random_free_port)
+ local lastport=
+ for i in $(seq 1 $((size - 1))); do
+ lastport=$((firstport + i))
+ if ! port_is_free $lastport; then
+ echo "# port $lastport is in use; trying another." >&3
+ lastport=
+ break
+ fi
+ done
+ if [[ -n "$lastport" ]]; then
+ echo "$firstport-$lastport"
+ return
+ fi
+
+ maxtries=$((maxtries - 1))
+ done
+
+ die "Could not find free port range with size $size"
+}
+
+function port_is_free() {
+ local port=${1?Usage: port_is_free PORT}
+ ! { exec {unused_fd}<> /dev/tcp/127.0.0.1/$port; } &>/dev/null
+}
+
###################
# wait_for_port # Returns once port is available on host
###################
@@ -397,25 +428,25 @@ function _ensure_pod_state() {
for i in {0..5}; do
run_podman pod inspect $1 --format "{{.State}}"
if [[ $output == "$2" ]]; then
- break
+ return
fi
sleep 0.5
done
- is "$output" "$2" "unexpected pod state"
+ die "Timed out waiting for pod $1 to enter state $2"
}
# Wait for the container's (1st arg) running state (2nd arg)
function _ensure_container_running() {
- for i in {0..5}; do
+ for i in {0..20}; do
run_podman container inspect $1 --format "{{.State.Running}}"
if [[ $output == "$2" ]]; then
- break
+ return
fi
sleep 0.5
done
- is "$output" "$2" "unexpected pod state"
+ die "Timed out waiting for container $1 to enter state running=$2"
}
###########################
diff --git a/test/system/helpers.systemd.bash b/test/system/helpers.systemd.bash
index 4bde912a4..d9abc087d 100644
--- a/test/system/helpers.systemd.bash
+++ b/test/system/helpers.systemd.bash
@@ -28,3 +28,7 @@ systemctl() {
journalctl() {
command journalctl $_DASHUSER "$@"
}
+
+systemd-run() {
+ command systemd-run $_DASHUSER "$@";
+}
diff --git a/test/testvol/Containerfile b/test/testvol/Containerfile
new file mode 100644
index 000000000..32448f5a9
--- /dev/null
+++ b/test/testvol/Containerfile
@@ -0,0 +1,9 @@
+FROM docker.io/library/golang:1.18-alpine AS build-img
+COPY ./ /go/src/github.com/containers/podman/
+WORKDIR /go/src/github.com/containers/podman
+RUN GO111MODULE=off go build -o /testvol ./test/testvol
+
+FROM alpine
+COPY --from=build-img /testvol /usr/local/bin
+WORKDIR /
+ENTRYPOINT ["/usr/local/bin/testvol", "serve"]
diff --git a/test/testvol/create.go b/test/testvol/create.go
new file mode 100644
index 000000000..d29300f0b
--- /dev/null
+++ b/test/testvol/create.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
+ "github.com/spf13/cobra"
+)
+
+var createCmd = &cobra.Command{
+ Use: "create NAME",
+ Short: "create a volume",
+ Long: `Create a volume in the volume plugin listening on --sock-name`,
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return createVol(config.sockName, args[0])
+ },
+}
+
+func createVol(sockName, volName string) error {
+ plugin, err := getPlugin(sockName)
+ if err != nil {
+ return err
+ }
+ createReq := new(pluginapi.CreateRequest)
+ createReq.Name = volName
+ return plugin.CreateVolume(createReq)
+}
diff --git a/test/testvol/list.go b/test/testvol/list.go
new file mode 100644
index 000000000..fea615a70
--- /dev/null
+++ b/test/testvol/list.go
@@ -0,0 +1,32 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+var listCmd = &cobra.Command{
+ Use: "list",
+ Short: "list all volumes",
+ Long: `List all volumes from the volume plugin listening on --sock-name`,
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return listVol(config.sockName)
+ },
+}
+
+func listVol(sockName string) error {
+ plugin, err := getPlugin(sockName)
+ if err != nil {
+ return err
+ }
+ vols, err := plugin.ListVolumes()
+ if err != nil {
+ return err
+ }
+ for _, vol := range vols {
+ fmt.Println(vol.Name)
+ }
+ return nil
+}
diff --git a/test/testvol/main.go b/test/testvol/main.go
index 30ab365b3..dd4ba642d 100644
--- a/test/testvol/main.go
+++ b/test/testvol/main.go
@@ -1,6 +1,7 @@
package main
import (
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -8,19 +9,25 @@ import (
"time"
"github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
- Use: "testvol",
- Short: "testvol - volume plugin for Podman",
+ Use: "testvol",
+ Short: "testvol - volume plugin for Podman testing",
+ PersistentPreRunE: before,
+ SilenceUsage: true,
+}
+
+var serveCmd = &cobra.Command{
+ Use: "serve",
+ Short: "serve the volume plugin on the unix socket",
Long: `Creates simple directory volumes using the Volume Plugin API for testing volume plugin functionality`,
+ Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return startServer(config.sockName)
},
- PersistentPreRunE: before,
}
// Configuration for the volume plugin
@@ -37,9 +44,12 @@ var config = cliConfig{
}
func init() {
- rootCmd.Flags().StringVar(&config.sockName, "sock-name", config.sockName, "Name of unix socket for plugin")
- rootCmd.Flags().StringVar(&config.path, "path", "", "Path to initialize state and mount points")
+ rootCmd.PersistentFlags().StringVar(&config.sockName, "sock-name", config.sockName, "Name of unix socket for plugin")
rootCmd.PersistentFlags().StringVar(&config.logLevel, "log-level", config.logLevel, "Log messages including and over the specified level: debug, info, warn, error, fatal, panic")
+
+ serveCmd.Flags().StringVar(&config.path, "path", "", "Path to initialize state and mount points")
+
+ rootCmd.AddCommand(serveCmd, createCmd, removeCmd, listCmd)
}
func before(cmd *cobra.Command, args []string) error {
@@ -59,11 +69,8 @@ func before(cmd *cobra.Command, args []string) error {
func main() {
if err := rootCmd.Execute(); err != nil {
- logrus.Errorf("Running volume plugin: %v", err)
os.Exit(1)
}
-
- os.Exit(0)
}
// startServer runs the HTTP server and responds to requests
@@ -73,16 +80,16 @@ func startServer(socketPath string) error {
if config.path == "" {
path, err := ioutil.TempDir("", "test_volume_plugin")
if err != nil {
- return errors.Wrapf(err, "error getting directory for plugin")
+ return fmt.Errorf("error getting directory for plugin: %w", err)
}
config.path = path
} else {
pathStat, err := os.Stat(config.path)
if err != nil {
- return errors.Wrapf(err, "unable to access requested plugin state directory")
+ return fmt.Errorf("unable to access requested plugin state directory: %w", err)
}
if !pathStat.IsDir() {
- return errors.Errorf("cannot use %v as plugin state dir as it is not a directory", config.path)
+ return fmt.Errorf("cannot use %v as plugin state dir as it is not a directory", config.path)
}
}
@@ -91,7 +98,7 @@ func startServer(socketPath string) error {
server := volume.NewHandler(handle)
if err := server.ServeUnix(socketPath, 0); err != nil {
- return errors.Wrapf(err, "error starting server")
+ return fmt.Errorf("error starting server: %w", err)
}
return nil
}
@@ -140,7 +147,7 @@ func (d *DirDriver) Create(opts *volume.CreateRequest) error {
logrus.Infof("Hit Create() endpoint")
if _, exists := d.volumes[opts.Name]; exists {
- return errors.Errorf("volume with name %s already exists", opts.Name)
+ return fmt.Errorf("volume with name %s already exists", opts.Name)
}
newVol := new(dirVol)
@@ -154,7 +161,7 @@ func (d *DirDriver) Create(opts *volume.CreateRequest) error {
volPath := filepath.Join(d.volumesPath, opts.Name)
if err := os.Mkdir(volPath, 0755); err != nil {
- return errors.Wrapf(err, "error making volume directory")
+ return fmt.Errorf("error making volume directory: %w", err)
}
newVol.path = volPath
@@ -197,7 +204,7 @@ func (d *DirDriver) Get(req *volume.GetRequest) (*volume.GetResponse, error) {
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Did not find volume %s", req.Name)
- return nil, errors.Errorf("no volume with name %s found", req.Name)
+ return nil, fmt.Errorf("no volume with name %s found", req.Name)
}
logrus.Debugf("Found volume %s", req.Name)
@@ -221,19 +228,19 @@ func (d *DirDriver) Remove(req *volume.RemoveRequest) error {
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Did not find volume %s", req.Name)
- return errors.Errorf("no volume with name %s found", req.Name)
+ return fmt.Errorf("no volume with name %s found", req.Name)
}
logrus.Debugf("Found volume %s", req.Name)
if len(vol.mounts) > 0 {
logrus.Debugf("Cannot remove %s, is mounted", req.Name)
- return errors.Errorf("volume %s is mounted and cannot be removed", req.Name)
+ return fmt.Errorf("volume %s is mounted and cannot be removed", req.Name)
}
delete(d.volumes, req.Name)
if err := os.RemoveAll(vol.path); err != nil {
- return errors.Wrapf(err, "error removing mountpoint of volume %s", req.Name)
+ return fmt.Errorf("error removing mountpoint of volume %s: %w", req.Name, err)
}
logrus.Debugf("Removed volume %s", req.Name)
@@ -253,7 +260,7 @@ func (d *DirDriver) Path(req *volume.PathRequest) (*volume.PathResponse, error)
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Cannot locate volume %s", req.Name)
- return nil, errors.Errorf("no volume with name %s found", req.Name)
+ return nil, fmt.Errorf("no volume with name %s found", req.Name)
}
return &volume.PathResponse{
@@ -271,7 +278,7 @@ func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, erro
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Cannot locate volume %s", req.Name)
- return nil, errors.Errorf("no volume with name %s found", req.Name)
+ return nil, fmt.Errorf("no volume with name %s found", req.Name)
}
vol.mounts[req.ID] = true
@@ -291,13 +298,13 @@ func (d *DirDriver) Unmount(req *volume.UnmountRequest) error {
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Cannot locate volume %s", req.Name)
- return errors.Errorf("no volume with name %s found", req.Name)
+ return fmt.Errorf("no volume with name %s found", req.Name)
}
mount := vol.mounts[req.ID]
if !mount {
logrus.Debugf("Volume %s is not mounted by %s", req.Name, req.ID)
- return errors.Errorf("volume %s is not mounted by %s", req.Name, req.ID)
+ return fmt.Errorf("volume %s is not mounted by %s", req.Name, req.ID)
}
delete(vol.mounts, req.ID)
diff --git a/test/testvol/remove.go b/test/testvol/remove.go
new file mode 100644
index 000000000..2839b0b50
--- /dev/null
+++ b/test/testvol/remove.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
+ "github.com/spf13/cobra"
+)
+
+var removeCmd = &cobra.Command{
+ Use: "remove NAME",
+ Short: "remove a volume",
+ Long: `Remove a volume in the volume plugin listening on --sock-name`,
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return removeVol(config.sockName, args[0])
+ },
+}
+
+func removeVol(sockName, volName string) error {
+ plugin, err := getPlugin(sockName)
+ if err != nil {
+ return err
+ }
+ removeReq := new(pluginapi.RemoveRequest)
+ removeReq.Name = volName
+ return plugin.RemoveVolume(removeReq)
+}
diff --git a/test/testvol/util.go b/test/testvol/util.go
new file mode 100644
index 000000000..b50bb3afb
--- /dev/null
+++ b/test/testvol/util.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/podman/v4/libpod/plugin"
+)
+
+const pluginSockDir = "/run/docker/plugins"
+
+func getSocketPath(pathOrName string) string {
+ if filepath.IsAbs(pathOrName) {
+ return pathOrName
+ }
+
+ // only a name join it with the default path
+ return filepath.Join(pluginSockDir, pathOrName+".sock")
+}
+
+func getPluginName(pathOrName string) string {
+ return strings.TrimSuffix(filepath.Base(pathOrName), ".sock")
+}
+
+func getPlugin(sockNameOrPath string) (*plugin.VolumePlugin, error) {
+ path := getSocketPath(sockNameOrPath)
+ name := getPluginName(sockNameOrPath)
+ return plugin.GetVolumePlugin(name, path, 0)
+}
diff --git a/test/tools/go.mod b/test/tools/go.mod
index 79d1fabe8..1c2867b99 100644
--- a/test/tools/go.mod
+++ b/test/tools/go.mod
@@ -5,5 +5,5 @@ go 1.16
require (
github.com/cpuguy83/go-md2man/v2 v2.0.2
github.com/vbatts/git-validation v1.1.0
- golang.org/x/tools v0.1.10
+ golang.org/x/tools v0.1.11
)
diff --git a/test/tools/go.sum b/test/tools/go.sum
index 9b466cbea..d17c3e645 100644
--- a/test/tools/go.sum
+++ b/test/tools/go.sum
@@ -20,8 +20,8 @@ github.com/vbatts/git-validation v1.1.0/go.mod h1:QyK3uQnRYWGt/5ezd8kcpwPrm6zn9t
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -41,9 +41,6 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
+golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
index 97dd390e8..437fc9997 100644
--- a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
+++ b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
@@ -32,3 +32,4 @@ check-mod: # verifies that module changes for go.mod and go.sum are checked in
.PHONY: vendor
vendor: mod
@go mod vendor -v
+
diff --git a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
index ae722d078..16d1133aa 100644
--- a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
+++ b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
@@ -20,3 +20,4 @@ go-md2man 1 "January 2015" go-md2man "User Manual"
# HISTORY
January 2015, Originally compiled by Brian Goff( cpuguy83@gmail.com )
+
diff --git a/test/tools/vendor/github.com/hashicorp/go-version/.travis.yml b/test/tools/vendor/github.com/hashicorp/go-version/.travis.yml
index b5f955d7e..01c5dc219 100644
--- a/test/tools/vendor/github.com/hashicorp/go-version/.travis.yml
+++ b/test/tools/vendor/github.com/hashicorp/go-version/.travis.yml
@@ -1,6 +1,6 @@
-language: go
+language: go
-go:
+go:
- 1.2
- 1.3
- 1.4
@@ -8,6 +8,6 @@ go:
- "1.10"
- 1.11
- 1.12
-
+
script:
- - go test
+ - go test
diff --git a/test/tools/vendor/github.com/hashicorp/go-version/LICENSE b/test/tools/vendor/github.com/hashicorp/go-version/LICENSE
index 82b4de97c..c33dcc7c9 100644
--- a/test/tools/vendor/github.com/hashicorp/go-version/LICENSE
+++ b/test/tools/vendor/github.com/hashicorp/go-version/LICENSE
@@ -351,3 +351,4 @@ Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.
+
diff --git a/test/tools/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/test/tools/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
index f8c9aa99e..949b77e30 100644
--- a/test/tools/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
+++ b/test/tools/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -9,7 +9,7 @@ See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/window
```go
import (
"syscall"
-
+
sequences "github.com/konsorten/go-windows-terminal-sequences"
)
diff --git a/test/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/test/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md
index 95ffc62ce..f62cbd24a 100644
--- a/test/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ b/test/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -1,7 +1,7 @@
# 1.4.1
This new release introduces:
* Enhance TextFormatter to not print caller information when they are empty (#944)
- * Remove dependency on golang.org/x/crypto (#932, #943)
+ * Remove dependency on golang.org/x/crypto (#932, #943)
Fixes:
* Fix Entry.WithContext method to return a copy of the initial entry (#941)
diff --git a/test/tools/vendor/github.com/sirupsen/logrus/README.md b/test/tools/vendor/github.com/sirupsen/logrus/README.md
index 3bf033166..a4796eb07 100644
--- a/test/tools/vendor/github.com/sirupsen/logrus/README.md
+++ b/test/tools/vendor/github.com/sirupsen/logrus/README.md
@@ -84,7 +84,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr
```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
-environment via benchmarks:
+environment via benchmarks:
```
go test -bench=.*CallerTracing
```
diff --git a/test/tools/vendor/github.com/sirupsen/logrus/appveyor.yml b/test/tools/vendor/github.com/sirupsen/logrus/appveyor.yml
index 1d4d64201..96c2ce15f 100644
--- a/test/tools/vendor/github.com/sirupsen/logrus/appveyor.yml
+++ b/test/tools/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -1,14 +1,14 @@
version: "{build}"
platform: x64
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
-environment:
+environment:
GOPATH: c:\gopath
-branches:
+branches:
only:
- master
-install:
+install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
-build_script:
+build_script:
- go get -t
- go test
diff --git a/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
index ff6ff7b99..3c4f43f91 100644
--- a/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
+++ b/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -10,3 +10,4 @@ func isTerminal(fd int) bool {
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
}
+
diff --git a/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
index 163c468d5..355dc966f 100644
--- a/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ b/test/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -10,3 +10,4 @@ func isTerminal(fd int) bool {
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
}
+
diff --git a/test/tools/vendor/github.com/vbatts/git-validation/README.md b/test/tools/vendor/github.com/vbatts/git-validation/README.md
index 000116e89..354276e02 100644
--- a/test/tools/vendor/github.com/vbatts/git-validation/README.md
+++ b/test/tools/vendor/github.com/vbatts/git-validation/README.md
@@ -73,7 +73,7 @@ vbatts@valse ~/src/vb/git-validation (master) $ git-validation -v
Here's a failure:
```console
-vbatts@valse ~/src/vb/git-validation (master) $ git-validation
+vbatts@valse ~/src/vb/git-validation (master) $ git-validation
* 49f51a8 "README: adding install and usage" ... FAIL
- FAIL - does not have a valid DCO
* d614ccf "*: run tests in a runner" ... PASS
@@ -103,3 +103,4 @@ See [`./rules/`](./rules/).
Feel free to contribute more.
Otherwise, by using `validate` package API directly, rules can be handed directly to the `validate.Runner`.
+
diff --git a/test/tools/vendor/golang.org/x/mod/module/module.go b/test/tools/vendor/golang.org/x/mod/module/module.go
index 355b5a456..c26d1d29e 100644
--- a/test/tools/vendor/golang.org/x/mod/module/module.go
+++ b/test/tools/vendor/golang.org/x/mod/module/module.go
@@ -15,7 +15,7 @@
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
-// Escaped Paths
+// # Escaped Paths
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
@@ -55,7 +55,7 @@
// Import paths have never allowed exclamation marks, so there is no
// need to define how to escape a literal !.
//
-// Unicode Restrictions
+// # Unicode Restrictions
//
// Today, paths are disallowed from using Unicode.
//
@@ -102,9 +102,9 @@ import (
"strings"
"unicode"
"unicode/utf8"
+ "errors"
"golang.org/x/mod/semver"
- errors "golang.org/x/xerrors"
)
// A Version (for clients, a module.Version) is defined by a module path and version pair.
diff --git a/test/tools/vendor/golang.org/x/tools/cmd/goimports/doc.go b/test/tools/vendor/golang.org/x/tools/cmd/goimports/doc.go
index 5a5b9005f..18a3ad448 100644
--- a/test/tools/vendor/golang.org/x/tools/cmd/goimports/doc.go
+++ b/test/tools/vendor/golang.org/x/tools/cmd/goimports/doc.go
@@ -3,29 +3,33 @@
// license that can be found in the LICENSE file.
/*
-
Command goimports updates your Go import lines,
adding missing ones and removing unreferenced ones.
- $ go install golang.org/x/tools/cmd/goimports@latest
+ $ go install golang.org/x/tools/cmd/goimports@latest
In addition to fixing imports, goimports also formats
your code in the same style as gofmt so it can be used
as a replacement for your editor's gofmt-on-save hook.
For emacs, make sure you have the latest go-mode.el:
- https://github.com/dominikh/go-mode.el
+
+ https://github.com/dominikh/go-mode.el
+
Then in your .emacs file:
- (setq gofmt-command "goimports")
- (add-hook 'before-save-hook 'gofmt-before-save)
+
+ (setq gofmt-command "goimports")
+ (add-hook 'before-save-hook 'gofmt-before-save)
For vim, set "gofmt_command" to "goimports":
- https://golang.org/change/39c724dd7f252
- https://golang.org/wiki/IDEsAndTextEditorPlugins
- etc
+
+ https://golang.org/change/39c724dd7f252
+ https://golang.org/wiki/IDEsAndTextEditorPlugins
+ etc
For GoSublime, follow the steps described here:
- http://michaelwhatcott.com/gosublime-goimports/
+
+ http://michaelwhatcott.com/gosublime-goimports/
For other editors, you probably know what to do.
@@ -39,9 +43,8 @@ working and see what goimports is doing.
File bugs or feature requests at:
- https://golang.org/issues/new?title=x/tools/cmd/goimports:+
+ https://golang.org/issues/new?title=x/tools/cmd/goimports:+
Happy hacking!
-
*/
package main // import "golang.org/x/tools/cmd/goimports"
diff --git a/test/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/test/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
index a5c6d6d4f..9fa5aa192 100644
--- a/test/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
+++ b/test/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
@@ -22,9 +22,9 @@ import (
// additional whitespace abutting a node to be enclosed by it.
// In this example:
//
-// z := x + y // add them
-// <-A->
-// <----B----->
+// z := x + y // add them
+// <-A->
+// <----B----->
//
// the ast.BinaryExpr(+) node is considered to enclose interval B
// even though its [Pos()..End()) is actually only interval A.
@@ -43,10 +43,10 @@ import (
// interior whitespace of path[0].
// In this example:
//
-// z := x + y // add them
-// <--C--> <---E-->
-// ^
-// D
+// z := x + y // add them
+// <--C--> <---E-->
+// ^
+// D
//
// intervals C, D and E are inexact. C is contained by the
// z-assignment statement, because it spans three of its children (:=,
@@ -54,12 +54,11 @@ import (
// interior whitespace of the assignment. E is considered interior
// whitespace of the BlockStmt containing the assignment.
//
-// Precondition: [start, end) both lie within the same file as root.
-// TODO(adonovan): return (nil, false) in this case and remove precond.
-// Requires FileSet; see loader.tokenFileContainsPos.
-//
-// Postcondition: path is never nil; it always contains at least 'root'.
-//
+// The resulting path is never empty; it always contains at least the
+// 'root' *ast.File. Ideally PathEnclosingInterval would reject
+// intervals that lie wholly or partially outside the range of the
+// file, but unfortunately ast.File records only the token.Pos of
+// the 'package' keyword, but not of the start of the file itself.
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
@@ -135,6 +134,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
return false // inexact: overlaps multiple children
}
+ // Ensure [start,end) is nondecreasing.
if start > end {
start, end = end, start
}
@@ -162,7 +162,6 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
// tokenNode is a dummy implementation of ast.Node for a single token.
// They are used transiently by PathEnclosingInterval but never escape
// this package.
-//
type tokenNode struct {
pos token.Pos
end token.Pos
@@ -183,7 +182,6 @@ func tok(pos token.Pos, len int) ast.Node {
// childrenOf returns the direct non-nil children of ast.Node n.
// It may include fake ast.Node implementations for bare tokens.
// it is not safe to call (e.g.) ast.Walk on such nodes.
-//
func childrenOf(n ast.Node) []ast.Node {
var children []ast.Node
@@ -488,7 +486,6 @@ func (sl byPos) Swap(i, j int) {
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
// StarExpr) we could be much more specific given the path to the AST
// root. Perhaps we should do that.
-//
func NodeDescription(n ast.Node) string {
switch n := n.(type) {
case *ast.ArrayType:
diff --git a/test/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/test/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go
index 2087ceec9..18d1adb05 100644
--- a/test/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go
+++ b/test/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go
@@ -22,8 +22,11 @@ func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
// If name is not empty, it is used to rename the import.
//
// For example, calling
+//
// AddNamedImport(fset, f, "pathpkg", "path")
+//
// adds
+//
// import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
if imports(f, name, path) {
@@ -270,8 +273,8 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
}
if j > 0 {
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
- lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
- line := fset.Position(impspec.Path.ValuePos).Line
+ lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line
+ line := fset.PositionFor(impspec.Path.ValuePos, false).Line
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
diff --git a/test/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/test/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
index 6d9ca23e2..f430b21b9 100644
--- a/test/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
+++ b/test/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -41,7 +41,6 @@ type ApplyFunc func(*Cursor) bool
// Children are traversed in the order in which they appear in the
// respective node's struct definition. A package's files are
// traversed in the filenames' alphabetical order.
-//
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
parent := &struct{ ast.Node }{root}
defer func() {
@@ -65,8 +64,8 @@ var abort = new(int) // singleton, to signal termination of Apply
// c.Parent(), and f is the field identifier with name c.Name(),
// the following invariants hold:
//
-// p.f == c.Node() if c.Index() < 0
-// p.f[c.Index()] == c.Node() if c.Index() >= 0
+// p.f == c.Node() if c.Index() < 0
+// p.f[c.Index()] == c.Node() if c.Index() >= 0
//
// The methods Replace, Delete, InsertBefore, and InsertAfter
// can be used to change the AST without disrupting Apply.
@@ -294,6 +293,9 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
a.apply(n, "Fields", nil, n.Fields)
case *ast.FuncType:
+ if tparams := typeparams.ForFuncType(n); tparams != nil {
+ a.apply(n, "TypeParams", nil, tparams)
+ }
a.apply(n, "Params", nil, n.Params)
a.apply(n, "Results", nil, n.Results)
@@ -406,6 +408,9 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
case *ast.TypeSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
+ if tparams := typeparams.ForTypeSpec(n); tparams != nil {
+ a.apply(n, "TypeParams", nil, tparams)
+ }
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Comment", nil, n.Comment)
diff --git a/test/tools/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/test/tools/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
index 9887f7e7a..798fe599b 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
@@ -40,12 +40,12 @@ var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
// If fastWalk returns filepath.SkipDir, the directory is skipped.
//
// Unlike filepath.Walk:
-// * file stat calls must be done by the user.
+// - file stat calls must be done by the user.
// The only provided metadata is the file type, which does not include
// any permission bits.
-// * multiple goroutines stat the filesystem concurrently. The provided
+// - multiple goroutines stat the filesystem concurrently. The provided
// walkFn must be safe for concurrent use.
-// * fastWalk can follow symlinks if walkFn returns the TraverseLink
+// - fastWalk can follow symlinks if walkFn returns the TraverseLink
// sentinel error. It is the walkFn's responsibility to prevent
// fastWalk from going into symlink cycles.
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
diff --git a/test/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/test/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index f75336834..67256dc39 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -264,8 +264,10 @@ func cmdDebugStr(cmd *exec.Cmd) string {
env := make(map[string]string)
for _, kv := range cmd.Env {
split := strings.SplitN(kv, "=", 2)
- k, v := split[0], split[1]
- env[k] = v
+ if len(split) == 2 {
+ k, v := split[0], split[1]
+ env[k] = v
+ }
}
var args []string
diff --git a/test/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/test/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
index 925ff5356..168405322 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
@@ -175,8 +175,8 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
// walk walks through the given path.
func (w *walker) walk(path string, typ os.FileMode) error {
- dir := filepath.Dir(path)
if typ.IsRegular() {
+ dir := filepath.Dir(path)
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
@@ -209,12 +209,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
// Emacs noise.
return nil
}
- fi, err := os.Lstat(path)
- if err != nil {
- // Just ignore it.
- return nil
- }
- if w.shouldTraverse(dir, fi) {
+ if w.shouldTraverse(path) {
return fastwalk.ErrTraverseLink
}
}
@@ -224,13 +219,8 @@ func (w *walker) walk(path string, typ os.FileMode) error {
// shouldTraverse reports whether the symlink fi, found in dir,
// should be followed. It makes sure symlinks were never visited
// before to avoid symlink loops.
-func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
- path := filepath.Join(dir, fi.Name())
- target, err := filepath.EvalSymlinks(path)
- if err != nil {
- return false
- }
- ts, err := os.Stat(target)
+func (w *walker) shouldTraverse(path string) bool {
+ ts, err := os.Stat(path)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return false
@@ -238,7 +228,7 @@ func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
if !ts.IsDir() {
return false
}
- if w.shouldSkipDir(ts, dir) {
+ if w.shouldSkipDir(ts, filepath.Dir(path)) {
return false
}
// Check for symlink loops by statting each directory component
diff --git a/test/tools/vendor/golang.org/x/tools/internal/imports/imports.go b/test/tools/vendor/golang.org/x/tools/internal/imports/imports.go
index 25973989e..95a88383a 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -103,12 +103,17 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
return formatFile(fileSet, file, src, nil, opt)
}
-func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
- mergeImports(fileSet, file)
- sortImports(opt.LocalPrefix, fileSet, file)
- imps := astutil.Imports(fileSet, file)
+// formatFile formats the file syntax tree.
+// It may mutate the token.FileSet.
+//
+// If an adjust function is provided, it is called after formatting
+// with the original source (formatFile's src parameter) and the
+// formatted file, and returns the postpocessed result.
+func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
+ mergeImports(file)
+ sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
var spacesBefore []string // import paths we need spaces before
- for _, impSection := range imps {
+ for _, impSection := range astutil.Imports(fset, file) {
// Within each block of contiguous imports, see if any
// import lines are in different group numbers. If so,
// we'll need to put a space between them so it's
@@ -132,7 +137,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
var buf bytes.Buffer
- err := printConfig.Fprint(&buf, fileSet, file)
+ err := printConfig.Fprint(&buf, fset, file)
if err != nil {
return nil, err
}
@@ -276,11 +281,11 @@ func cutSpace(b []byte) (before, middle, after []byte) {
}
// matchSpace reformats src to use the same space context as orig.
-// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
-// 2) matchSpace copies the indentation of the first non-blank line in orig
-// to every non-blank line in src.
-// 3) matchSpace copies the trailing space from orig and uses it in place
-// of src's trailing space.
+// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src.
+// 2. matchSpace copies the indentation of the first non-blank line in orig
+// to every non-blank line in src.
+// 3. matchSpace copies the trailing space from orig and uses it in place
+// of src's trailing space.
func matchSpace(orig []byte, src []byte) []byte {
before, _, after := cutSpace(orig)
i := bytes.LastIndex(before, []byte{'\n'})
diff --git a/test/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go b/test/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go
index dc52372e4..85144db1d 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// Hacked up copy of go/ast/import.go
+// Modified to use a single token.File in preference to a FileSet.
package imports
@@ -16,7 +17,9 @@ import (
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
-func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
+//
+// It may mutate the token.File.
+func sortImports(localPrefix string, tokFile *token.File, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
@@ -39,21 +42,21 @@ func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
- if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+ if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) {
// j begins a new run. End this one.
- specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...)
+ specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...)
i = j
}
}
- specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...)
+ specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
- lastLine := fset.Position(lastSpec.Pos()).Line
- if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
- fset.File(d.Rparen).MergeLine(rParenLine - 1)
+ lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line
+ if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 {
+ tokFile.MergeLine(rParenLine - 1) // has side effects!
}
}
}
@@ -62,7 +65,7 @@ func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
// mergeImports merges all the import declarations into the first one.
// Taken from golang.org/x/tools/ast/astutil.
// This does not adjust line numbers properly
-func mergeImports(fset *token.FileSet, f *ast.File) {
+func mergeImports(f *ast.File) {
if len(f.Decls) <= 1 {
return
}
@@ -144,7 +147,9 @@ type posSpan struct {
End token.Pos
}
-func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
+// sortSpecs sorts the import specs within each import decl.
+// It may mutate the token.File.
+func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
@@ -160,7 +165,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
// Identify comments in this range.
// Any comment from pos[0].Start to the final line counts.
- lastLine := fset.Position(pos[len(pos)-1].End).Line
+ lastLine := tokFile.Line(pos[len(pos)-1].End)
cstart := len(f.Comments)
cend := len(f.Comments)
for i, g := range f.Comments {
@@ -170,7 +175,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
if i < cstart {
cstart = i
}
- if fset.Position(g.End()).Line > lastLine {
+ if tokFile.Line(g.End()) > lastLine {
cend = i
break
}
@@ -203,7 +208,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
deduped = append(deduped, s)
} else {
p := s.Pos()
- fset.File(p).MergeLine(fset.Position(p).Line)
+ tokFile.MergeLine(tokFile.Line(p)) // has side effects!
}
}
specs = deduped
@@ -234,21 +239,21 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
// Fixup comments can insert blank lines, because import specs are on different lines.
// We remove those blank lines here by merging import spec to the first import spec line.
- firstSpecLine := fset.Position(specs[0].Pos()).Line
+ firstSpecLine := tokFile.Line(specs[0].Pos())
for _, s := range specs[1:] {
p := s.Pos()
- line := fset.File(p).Line(p)
+ line := tokFile.Line(p)
for previousLine := line - 1; previousLine >= firstSpecLine; {
// MergeLine can panic. Avoid the panic at the cost of not removing the blank line
// golang/go#50329
- if previousLine > 0 && previousLine < fset.File(p).LineCount() {
- fset.File(p).MergeLine(previousLine)
+ if previousLine > 0 && previousLine < tokFile.LineCount() {
+ tokFile.MergeLine(previousLine) // has side effects!
previousLine--
} else {
// try to gather some data to diagnose how this could happen
req := "Please report what the imports section of your go file looked like."
log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s",
- firstSpecLine, line, previousLine, fset.File(p).LineCount(), req)
+ firstSpecLine, line, previousLine, tokFile.LineCount(), req)
}
}
}
diff --git a/test/tools/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/test/tools/vendor/golang.org/x/tools/internal/imports/zstdlib.go
index 7de2be9b4..437fbb78d 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/imports/zstdlib.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/imports/zstdlib.go
@@ -88,6 +88,7 @@ var stdlib = map[string][]string{
"ContainsAny",
"ContainsRune",
"Count",
+ "Cut",
"Equal",
"EqualFold",
"ErrTooLarge",
@@ -711,6 +712,11 @@ var stdlib = map[string][]string{
"ValueConverter",
"Valuer",
},
+ "debug/buildinfo": []string{
+ "BuildInfo",
+ "Read",
+ "ReadFile",
+ },
"debug/dwarf": []string{
"AddrType",
"ArrayType",
@@ -1944,6 +1950,7 @@ var stdlib = map[string][]string{
"R_PPC64_REL24_NOTOC",
"R_PPC64_REL32",
"R_PPC64_REL64",
+ "R_PPC64_RELATIVE",
"R_PPC64_SECTOFF_DS",
"R_PPC64_SECTOFF_LO_DS",
"R_PPC64_TLS",
@@ -2547,6 +2554,7 @@ var stdlib = map[string][]string{
"Symbol",
},
"debug/plan9obj": []string{
+ "ErrNoSymbols",
"File",
"FileHeader",
"Magic386",
@@ -2906,6 +2914,7 @@ var stdlib = map[string][]string{
"Importer",
"IncDecStmt",
"IndexExpr",
+ "IndexListExpr",
"Inspect",
"InterfaceType",
"IsExported",
@@ -3179,6 +3188,7 @@ var stdlib = map[string][]string{
"SUB",
"SUB_ASSIGN",
"SWITCH",
+ "TILDE",
"TYPE",
"Token",
"UnaryPrec",
@@ -3187,6 +3197,7 @@ var stdlib = map[string][]string{
"XOR_ASSIGN",
},
"go/types": []string{
+ "ArgumentError",
"Array",
"AssertableTo",
"AssignableTo",
@@ -3205,6 +3216,7 @@ var stdlib = map[string][]string{
"Complex64",
"Config",
"Const",
+ "Context",
"ConvertibleTo",
"DefPredeclaredTestFuncs",
"Default",
@@ -3224,6 +3236,8 @@ var stdlib = map[string][]string{
"ImporterFrom",
"Info",
"Initializer",
+ "Instance",
+ "Instantiate",
"Int",
"Int16",
"Int32",
@@ -3254,6 +3268,7 @@ var stdlib = map[string][]string{
"NewChan",
"NewChecker",
"NewConst",
+ "NewContext",
"NewField",
"NewFunc",
"NewInterface",
@@ -3268,10 +3283,14 @@ var stdlib = map[string][]string{
"NewPointer",
"NewScope",
"NewSignature",
+ "NewSignatureType",
"NewSlice",
"NewStruct",
+ "NewTerm",
"NewTuple",
"NewTypeName",
+ "NewTypeParam",
+ "NewUnion",
"NewVar",
"Nil",
"Object",
@@ -3296,11 +3315,15 @@ var stdlib = map[string][]string{
"StdSizes",
"String",
"Struct",
+ "Term",
"Tuple",
"Typ",
"Type",
"TypeAndValue",
+ "TypeList",
"TypeName",
+ "TypeParam",
+ "TypeParamList",
"TypeString",
"Uint",
"Uint16",
@@ -3308,6 +3331,7 @@ var stdlib = map[string][]string{
"Uint64",
"Uint8",
"Uintptr",
+ "Union",
"Universe",
"Unsafe",
"UnsafePointer",
@@ -4080,9 +4104,11 @@ var stdlib = map[string][]string{
"SRV",
"SplitHostPort",
"TCPAddr",
+ "TCPAddrFromAddrPort",
"TCPConn",
"TCPListener",
"UDPAddr",
+ "UDPAddrFromAddrPort",
"UDPConn",
"UnixAddr",
"UnixConn",
@@ -4142,6 +4168,7 @@ var stdlib = map[string][]string{
"ListenAndServe",
"ListenAndServeTLS",
"LocalAddrContextKey",
+ "MaxBytesHandler",
"MaxBytesReader",
"MethodConnect",
"MethodDelete",
@@ -4338,6 +4365,25 @@ var stdlib = map[string][]string{
"ParseDate",
"ReadMessage",
},
+ "net/netip": []string{
+ "Addr",
+ "AddrFrom16",
+ "AddrFrom4",
+ "AddrFromSlice",
+ "AddrPort",
+ "AddrPortFrom",
+ "IPv4Unspecified",
+ "IPv6LinkLocalAllNodes",
+ "IPv6Unspecified",
+ "MustParseAddr",
+ "MustParseAddrPort",
+ "MustParsePrefix",
+ "ParseAddr",
+ "ParseAddrPort",
+ "ParsePrefix",
+ "Prefix",
+ "PrefixFrom",
+ },
"net/rpc": []string{
"Accept",
"Call",
@@ -4641,6 +4687,8 @@ var stdlib = map[string][]string{
"Method",
"New",
"NewAt",
+ "Pointer",
+ "PointerTo",
"Ptr",
"PtrTo",
"RecvDir",
@@ -4819,9 +4867,11 @@ var stdlib = map[string][]string{
},
"runtime/debug": []string{
"BuildInfo",
+ "BuildSetting",
"FreeOSMemory",
"GCStats",
"Module",
+ "ParseBuildInfo",
"PrintStack",
"ReadBuildInfo",
"ReadGCStats",
@@ -4939,11 +4989,13 @@ var stdlib = map[string][]string{
},
"strings": []string{
"Builder",
+ "Clone",
"Compare",
"Contains",
"ContainsAny",
"ContainsRune",
"Count",
+ "Cut",
"EqualFold",
"Fields",
"FieldsFunc",
@@ -9793,6 +9845,7 @@ var stdlib = map[string][]string{
"Syscall18",
"Syscall6",
"Syscall9",
+ "SyscallN",
"Sysctl",
"SysctlUint32",
"Sysctlnode",
@@ -10202,7 +10255,6 @@ var stdlib = map[string][]string{
"Value",
"ValueError",
"ValueOf",
- "Wrapper",
},
"testing": []string{
"AllocsPerRun",
@@ -10213,9 +10265,11 @@ var stdlib = map[string][]string{
"CoverBlock",
"CoverMode",
"Coverage",
+ "F",
"Init",
"InternalBenchmark",
"InternalExample",
+ "InternalFuzzTarget",
"InternalTest",
"M",
"Main",
@@ -10313,9 +10367,11 @@ var stdlib = map[string][]string{
"ActionNode",
"BoolNode",
"BranchNode",
+ "BreakNode",
"ChainNode",
"CommandNode",
"CommentNode",
+ "ContinueNode",
"DotNode",
"FieldNode",
"IdentifierNode",
@@ -10329,9 +10385,11 @@ var stdlib = map[string][]string{
"Node",
"NodeAction",
"NodeBool",
+ "NodeBreak",
"NodeChain",
"NodeCommand",
"NodeComment",
+ "NodeContinue",
"NodeDot",
"NodeField",
"NodeIdentifier",
@@ -10727,6 +10785,7 @@ var stdlib = map[string][]string{
"IsSurrogate",
},
"unicode/utf8": []string{
+ "AppendRune",
"DecodeLastRune",
"DecodeLastRuneInString",
"DecodeRune",
diff --git a/test/tools/vendor/golang.org/x/tools/internal/typeparams/common.go b/test/tools/vendor/golang.org/x/tools/internal/typeparams/common.go
index ab6b30b83..25a1426d3 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -16,11 +16,10 @@
// Additionally, this package contains common utilities for working with the
// new generic constructs, to supplement the standard library APIs. Notably,
// the StructuralTerms API computes a minimal representation of the structural
-// restrictions on a type parameter. In the future, this API may be available
-// from go/types.
+// restrictions on a type parameter.
//
-// See the example/README.md for a more detailed guide on how to update tools
-// to support generics.
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
package typeparams
import (
@@ -121,15 +120,15 @@ func OriginMethod(fn *types.Func) *types.Func {
//
// For example, consider the following type declarations:
//
-// type Interface[T any] interface {
-// Accept(T)
-// }
+// type Interface[T any] interface {
+// Accept(T)
+// }
//
-// type Container[T any] struct {
-// Element T
-// }
+// type Container[T any] struct {
+// Element T
+// }
//
-// func (c Container[T]) Accept(t T) { c.Element = t }
+// func (c Container[T]) Accept(t T) { c.Element = t }
//
// In this case, GenericAssignableTo reports that instantiations of Container
// are assignable to the corresponding instantiation of Interface.
diff --git a/test/tools/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/test/tools/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 000000000..993135ec9
--- /dev/null
+++ b/test/tools/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,122 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := _NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// _NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// _NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, _NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, _NormalTerms returns ErrEmptyTypeSet.
+//
+// _NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func _NormalTerms(typ types.Type) ([]*Term, error) {
+ switch typ := typ.(type) {
+ case *TypeParam:
+ return StructuralTerms(typ)
+ case *Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*Term{NewTerm(false, typ)}, nil
+ }
+}
diff --git a/test/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/test/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go
index 090f142a5..9c631b651 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -24,20 +24,22 @@ var ErrEmptyTypeSet = errors.New("empty type set")
// Structural type restrictions of a type parameter are created via
// non-interface types embedded in its constraint interface (directly, or via a
// chain of interface embeddings). For example, in the declaration
-// type T[P interface{~int; m()}] int
+//
+// type T[P interface{~int; m()}] int
+//
// the structural restriction of the type parameter P is ~int.
//
// With interface embedding and unions, the specification of structural type
// restrictions may be arbitrarily complex. For example, consider the
// following:
//
-// type A interface{ ~string|~[]byte }
+// type A interface{ ~string|~[]byte }
//
-// type B interface{ int|string }
+// type B interface{ int|string }
//
-// type C interface { ~string|~int }
+// type C interface { ~string|~int }
//
-// type T[P interface{ A|B; C }] int
+// type T[P interface{ A|B; C }] int
//
// In this example, the structural type restriction of P is ~string|int: A|B
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
diff --git a/test/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/test/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go
index 10857d504..933106a23 100644
--- a/test/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go
+++ b/test/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -97,15 +97,6 @@ func (xl termlist) norm() termlist {
return rl
}
-// If the type set represented by xl is specified by a single (non-𝓤) term,
-// structuralType returns that type. Otherwise it returns nil.
-func (xl termlist) structuralType() types.Type {
- if nl := xl.norm(); len(nl) == 1 {
- return nl[0].typ // if nl.isAll() then typ is nil, which is ok
- }
- return nil
-}
-
// union returns the union xl ∪ yl.
func (xl termlist) union(yl termlist) termlist {
return append(xl, yl...).norm()
diff --git a/test/tools/vendor/golang.org/x/xerrors/LICENSE b/test/tools/vendor/golang.org/x/xerrors/LICENSE
deleted file mode 100644
index e4a47e17f..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2019 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/test/tools/vendor/golang.org/x/xerrors/PATENTS b/test/tools/vendor/golang.org/x/xerrors/PATENTS
deleted file mode 100644
index 733099041..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/test/tools/vendor/golang.org/x/xerrors/README b/test/tools/vendor/golang.org/x/xerrors/README
deleted file mode 100644
index aac7867a5..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/README
+++ /dev/null
@@ -1,2 +0,0 @@
-This repository holds the transition packages for the new Go 1.13 error values.
-See golang.org/design/29934-error-values.
diff --git a/test/tools/vendor/golang.org/x/xerrors/adaptor.go b/test/tools/vendor/golang.org/x/xerrors/adaptor.go
deleted file mode 100644
index 4317f2483..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/adaptor.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "strconv"
-)
-
-// FormatError calls the FormatError method of f with an errors.Printer
-// configured according to s and verb, and writes the result to s.
-func FormatError(f Formatter, s fmt.State, verb rune) {
- // Assuming this function is only called from the Format method, and given
- // that FormatError takes precedence over Format, it cannot be called from
- // any package that supports errors.Formatter. It is therefore safe to
- // disregard that State may be a specific printer implementation and use one
- // of our choice instead.
-
- // limitations: does not support printing error as Go struct.
-
- var (
- sep = " " // separator before next error
- p = &state{State: s}
- direct = true
- )
-
- var err error = f
-
- switch verb {
- // Note that this switch must match the preference order
- // for ordinary string printing (%#v before %+v, and so on).
-
- case 'v':
- if s.Flag('#') {
- if stringer, ok := err.(fmt.GoStringer); ok {
- io.WriteString(&p.buf, stringer.GoString())
- goto exit
- }
- // proceed as if it were %v
- } else if s.Flag('+') {
- p.printDetail = true
- sep = "\n - "
- }
- case 's':
- case 'q', 'x', 'X':
- // Use an intermediate buffer in the rare cases that precision,
- // truncation, or one of the alternative verbs (q, x, and X) are
- // specified.
- direct = false
-
- default:
- p.buf.WriteString("%!")
- p.buf.WriteRune(verb)
- p.buf.WriteByte('(')
- switch {
- case err != nil:
- p.buf.WriteString(reflect.TypeOf(f).String())
- default:
- p.buf.WriteString("<nil>")
- }
- p.buf.WriteByte(')')
- io.Copy(s, &p.buf)
- return
- }
-
-loop:
- for {
- switch v := err.(type) {
- case Formatter:
- err = v.FormatError((*printer)(p))
- case fmt.Formatter:
- v.Format(p, 'v')
- break loop
- default:
- io.WriteString(&p.buf, v.Error())
- break loop
- }
- if err == nil {
- break
- }
- if p.needColon || !p.printDetail {
- p.buf.WriteByte(':')
- p.needColon = false
- }
- p.buf.WriteString(sep)
- p.inDetail = false
- p.needNewline = false
- }
-
-exit:
- width, okW := s.Width()
- prec, okP := s.Precision()
-
- if !direct || (okW && width > 0) || okP {
- // Construct format string from State s.
- format := []byte{'%'}
- if s.Flag('-') {
- format = append(format, '-')
- }
- if s.Flag('+') {
- format = append(format, '+')
- }
- if s.Flag(' ') {
- format = append(format, ' ')
- }
- if okW {
- format = strconv.AppendInt(format, int64(width), 10)
- }
- if okP {
- format = append(format, '.')
- format = strconv.AppendInt(format, int64(prec), 10)
- }
- format = append(format, string(verb)...)
- fmt.Fprintf(s, string(format), p.buf.String())
- } else {
- io.Copy(s, &p.buf)
- }
-}
-
-var detailSep = []byte("\n ")
-
-// state tracks error printing state. It implements fmt.State.
-type state struct {
- fmt.State
- buf bytes.Buffer
-
- printDetail bool
- inDetail bool
- needColon bool
- needNewline bool
-}
-
-func (s *state) Write(b []byte) (n int, err error) {
- if s.printDetail {
- if len(b) == 0 {
- return 0, nil
- }
- if s.inDetail && s.needColon {
- s.needNewline = true
- if b[0] == '\n' {
- b = b[1:]
- }
- }
- k := 0
- for i, c := range b {
- if s.needNewline {
- if s.inDetail && s.needColon {
- s.buf.WriteByte(':')
- s.needColon = false
- }
- s.buf.Write(detailSep)
- s.needNewline = false
- }
- if c == '\n' {
- s.buf.Write(b[k:i])
- k = i + 1
- s.needNewline = true
- }
- }
- s.buf.Write(b[k:])
- if !s.inDetail {
- s.needColon = true
- }
- } else if !s.inDetail {
- s.buf.Write(b)
- }
- return len(b), nil
-}
-
-// printer wraps a state to implement an xerrors.Printer.
-type printer state
-
-func (s *printer) Print(args ...interface{}) {
- if !s.inDetail || s.printDetail {
- fmt.Fprint((*state)(s), args...)
- }
-}
-
-func (s *printer) Printf(format string, args ...interface{}) {
- if !s.inDetail || s.printDetail {
- fmt.Fprintf((*state)(s), format, args...)
- }
-}
-
-func (s *printer) Detail() bool {
- s.inDetail = true
- return s.printDetail
-}
diff --git a/test/tools/vendor/golang.org/x/xerrors/codereview.cfg b/test/tools/vendor/golang.org/x/xerrors/codereview.cfg
deleted file mode 100644
index 3f8b14b64..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/codereview.cfg
+++ /dev/null
@@ -1 +0,0 @@
-issuerepo: golang/go
diff --git a/test/tools/vendor/golang.org/x/xerrors/doc.go b/test/tools/vendor/golang.org/x/xerrors/doc.go
deleted file mode 100644
index eef99d9d5..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package xerrors implements functions to manipulate errors.
-//
-// This package is based on the Go 2 proposal for error values:
-// https://golang.org/design/29934-error-values
-//
-// These functions were incorporated into the standard library's errors package
-// in Go 1.13:
-// - Is
-// - As
-// - Unwrap
-//
-// Also, Errorf's %w verb was incorporated into fmt.Errorf.
-//
-// Use this package to get equivalent behavior in all supported Go versions.
-//
-// No other features of this package were included in Go 1.13, and at present
-// there are no plans to include any of them.
-package xerrors // import "golang.org/x/xerrors"
diff --git a/test/tools/vendor/golang.org/x/xerrors/errors.go b/test/tools/vendor/golang.org/x/xerrors/errors.go
deleted file mode 100644
index e88d3772d..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/errors.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import "fmt"
-
-// errorString is a trivial implementation of error.
-type errorString struct {
- s string
- frame Frame
-}
-
-// New returns an error that formats as the given text.
-//
-// The returned error contains a Frame set to the caller's location and
-// implements Formatter to show this information when printed with details.
-func New(text string) error {
- return &errorString{text, Caller(1)}
-}
-
-func (e *errorString) Error() string {
- return e.s
-}
-
-func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) }
-
-func (e *errorString) FormatError(p Printer) (next error) {
- p.Print(e.s)
- e.frame.Format(p)
- return nil
-}
diff --git a/test/tools/vendor/golang.org/x/xerrors/fmt.go b/test/tools/vendor/golang.org/x/xerrors/fmt.go
deleted file mode 100644
index 829862ddf..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/fmt.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "golang.org/x/xerrors/internal"
-)
-
-const percentBangString = "%!"
-
-// Errorf formats according to a format specifier and returns the string as a
-// value that satisfies error.
-//
-// The returned error includes the file and line number of the caller when
-// formatted with additional detail enabled. If the last argument is an error
-// the returned error's Format method will return it if the format string ends
-// with ": %s", ": %v", or ": %w". If the last argument is an error and the
-// format string ends with ": %w", the returned error implements an Unwrap
-// method returning it.
-//
-// If the format specifier includes a %w verb with an error operand in a
-// position other than at the end, the returned error will still implement an
-// Unwrap method returning the operand, but the error's Format method will not
-// return the wrapped error.
-//
-// It is invalid to include more than one %w verb or to supply it with an
-// operand that does not implement the error interface. The %w verb is otherwise
-// a synonym for %v.
-func Errorf(format string, a ...interface{}) error {
- format = formatPlusW(format)
- // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter.
- wrap := strings.HasSuffix(format, ": %w")
- idx, format2, ok := parsePercentW(format)
- percentWElsewhere := !wrap && idx >= 0
- if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) {
- err := errorAt(a, len(a)-1)
- if err == nil {
- return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)}
- }
- // TODO: this is not entirely correct. The error value could be
- // printed elsewhere in format if it mixes numbered with unnumbered
- // substitutions. With relatively small changes to doPrintf we can
- // have it optionally ignore extra arguments and pass the argument
- // list in its entirety.
- msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...)
- frame := Frame{}
- if internal.EnableTrace {
- frame = Caller(1)
- }
- if wrap {
- return &wrapError{msg, err, frame}
- }
- return &noWrapError{msg, err, frame}
- }
- // Support %w anywhere.
- // TODO: don't repeat the wrapped error's message when %w occurs in the middle.
- msg := fmt.Sprintf(format2, a...)
- if idx < 0 {
- return &noWrapError{msg, nil, Caller(1)}
- }
- err := errorAt(a, idx)
- if !ok || err == nil {
- // Too many %ws or argument of %w is not an error. Approximate the Go
- // 1.13 fmt.Errorf message.
- return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)}
- }
- frame := Frame{}
- if internal.EnableTrace {
- frame = Caller(1)
- }
- return &wrapError{msg, err, frame}
-}
-
-func errorAt(args []interface{}, i int) error {
- if i < 0 || i >= len(args) {
- return nil
- }
- err, ok := args[i].(error)
- if !ok {
- return nil
- }
- return err
-}
-
-// formatPlusW is used to avoid the vet check that will barf at %w.
-func formatPlusW(s string) string {
- return s
-}
-
-// Return the index of the only %w in format, or -1 if none.
-// Also return a rewritten format string with %w replaced by %v, and
-// false if there is more than one %w.
-// TODO: handle "%[N]w".
-func parsePercentW(format string) (idx int, newFormat string, ok bool) {
- // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go.
- idx = -1
- ok = true
- n := 0
- sz := 0
- var isW bool
- for i := 0; i < len(format); i += sz {
- if format[i] != '%' {
- sz = 1
- continue
- }
- // "%%" is not a format directive.
- if i+1 < len(format) && format[i+1] == '%' {
- sz = 2
- continue
- }
- sz, isW = parsePrintfVerb(format[i:])
- if isW {
- if idx >= 0 {
- ok = false
- } else {
- idx = n
- }
- // "Replace" the last character, the 'w', with a 'v'.
- p := i + sz - 1
- format = format[:p] + "v" + format[p+1:]
- }
- n++
- }
- return idx, format, ok
-}
-
-// Parse the printf verb starting with a % at s[0].
-// Return how many bytes it occupies and whether the verb is 'w'.
-func parsePrintfVerb(s string) (int, bool) {
- // Assume only that the directive is a sequence of non-letters followed by a single letter.
- sz := 0
- var r rune
- for i := 1; i < len(s); i += sz {
- r, sz = utf8.DecodeRuneInString(s[i:])
- if unicode.IsLetter(r) {
- return i + sz, r == 'w'
- }
- }
- return len(s), false
-}
-
-type noWrapError struct {
- msg string
- err error
- frame Frame
-}
-
-func (e *noWrapError) Error() string {
- return fmt.Sprint(e)
-}
-
-func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
-
-func (e *noWrapError) FormatError(p Printer) (next error) {
- p.Print(e.msg)
- e.frame.Format(p)
- return e.err
-}
-
-type wrapError struct {
- msg string
- err error
- frame Frame
-}
-
-func (e *wrapError) Error() string {
- return fmt.Sprint(e)
-}
-
-func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
-
-func (e *wrapError) FormatError(p Printer) (next error) {
- p.Print(e.msg)
- e.frame.Format(p)
- return e.err
-}
-
-func (e *wrapError) Unwrap() error {
- return e.err
-}
diff --git a/test/tools/vendor/golang.org/x/xerrors/format.go b/test/tools/vendor/golang.org/x/xerrors/format.go
deleted file mode 100644
index 1bc9c26b9..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/format.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-// A Formatter formats error messages.
-type Formatter interface {
- error
-
- // FormatError prints the receiver's first error and returns the next error in
- // the error chain, if any.
- FormatError(p Printer) (next error)
-}
-
-// A Printer formats error messages.
-//
-// The most common implementation of Printer is the one provided by package fmt
-// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message
-// typically provide their own implementations.
-type Printer interface {
- // Print appends args to the message output.
- Print(args ...interface{})
-
- // Printf writes a formatted string.
- Printf(format string, args ...interface{})
-
- // Detail reports whether error detail is requested.
- // After the first call to Detail, all text written to the Printer
- // is formatted as additional detail, or ignored when
- // detail has not been requested.
- // If Detail returns false, the caller can avoid printing the detail at all.
- Detail() bool
-}
diff --git a/test/tools/vendor/golang.org/x/xerrors/frame.go b/test/tools/vendor/golang.org/x/xerrors/frame.go
deleted file mode 100644
index 0de628ec5..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/frame.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
- "runtime"
-)
-
-// A Frame contains part of a call stack.
-type Frame struct {
- // Make room for three PCs: the one we were asked for, what it called,
- // and possibly a PC for skipPleaseUseCallersFrames. See:
- // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169
- frames [3]uintptr
-}
-
-// Caller returns a Frame that describes a frame on the caller's stack.
-// The argument skip is the number of frames to skip over.
-// Caller(0) returns the frame for the caller of Caller.
-func Caller(skip int) Frame {
- var s Frame
- runtime.Callers(skip+1, s.frames[:])
- return s
-}
-
-// location reports the file, line, and function of a frame.
-//
-// The returned function may be "" even if file and line are not.
-func (f Frame) location() (function, file string, line int) {
- frames := runtime.CallersFrames(f.frames[:])
- if _, ok := frames.Next(); !ok {
- return "", "", 0
- }
- fr, ok := frames.Next()
- if !ok {
- return "", "", 0
- }
- return fr.Function, fr.File, fr.Line
-}
-
-// Format prints the stack as error detail.
-// It should be called from an error's Format implementation
-// after printing any other error detail.
-func (f Frame) Format(p Printer) {
- if p.Detail() {
- function, file, line := f.location()
- if function != "" {
- p.Printf("%s\n ", function)
- }
- if file != "" {
- p.Printf("%s:%d\n", file, line)
- }
- }
-}
diff --git a/test/tools/vendor/golang.org/x/xerrors/go.mod b/test/tools/vendor/golang.org/x/xerrors/go.mod
deleted file mode 100644
index 870d4f612..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module golang.org/x/xerrors
-
-go 1.11
diff --git a/test/tools/vendor/golang.org/x/xerrors/internal/internal.go b/test/tools/vendor/golang.org/x/xerrors/internal/internal.go
deleted file mode 100644
index 89f4eca5d..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/internal/internal.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package internal
-
-// EnableTrace indicates whether stack information should be recorded in errors.
-var EnableTrace = true
diff --git a/test/tools/vendor/golang.org/x/xerrors/wrap.go b/test/tools/vendor/golang.org/x/xerrors/wrap.go
deleted file mode 100644
index 9a3b51037..000000000
--- a/test/tools/vendor/golang.org/x/xerrors/wrap.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
- "reflect"
-)
-
-// A Wrapper provides context around another error.
-type Wrapper interface {
- // Unwrap returns the next error in the error chain.
- // If there is no next error, Unwrap returns nil.
- Unwrap() error
-}
-
-// Opaque returns an error with the same error formatting as err
-// but that does not match err and cannot be unwrapped.
-func Opaque(err error) error {
- return noWrapper{err}
-}
-
-type noWrapper struct {
- error
-}
-
-func (e noWrapper) FormatError(p Printer) (next error) {
- if f, ok := e.error.(Formatter); ok {
- return f.FormatError(p)
- }
- p.Print(e.error)
- return nil
-}
-
-// Unwrap returns the result of calling the Unwrap method on err, if err implements
-// Unwrap. Otherwise, Unwrap returns nil.
-func Unwrap(err error) error {
- u, ok := err.(Wrapper)
- if !ok {
- return nil
- }
- return u.Unwrap()
-}
-
-// Is reports whether any error in err's chain matches target.
-//
-// An error is considered to match a target if it is equal to that target or if
-// it implements a method Is(error) bool such that Is(target) returns true.
-func Is(err, target error) bool {
- if target == nil {
- return err == target
- }
-
- isComparable := reflect.TypeOf(target).Comparable()
- for {
- if isComparable && err == target {
- return true
- }
- if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
- return true
- }
- // TODO: consider supporing target.Is(err). This would allow
- // user-definable predicates, but also may allow for coping with sloppy
- // APIs, thereby making it easier to get away with them.
- if err = Unwrap(err); err == nil {
- return false
- }
- }
-}
-
-// As finds the first error in err's chain that matches the type to which target
-// points, and if so, sets the target to its value and returns true. An error
-// matches a type if it is assignable to the target type, or if it has a method
-// As(interface{}) bool such that As(target) returns true. As will panic if target
-// is not a non-nil pointer to a type which implements error or is of interface type.
-//
-// The As method should set the target to its value and return true if err
-// matches the type to which target points.
-func As(err error, target interface{}) bool {
- if target == nil {
- panic("errors: target cannot be nil")
- }
- val := reflect.ValueOf(target)
- typ := val.Type()
- if typ.Kind() != reflect.Ptr || val.IsNil() {
- panic("errors: target must be a non-nil pointer")
- }
- if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) {
- panic("errors: *target must be interface or implement error")
- }
- targetType := typ.Elem()
- for err != nil {
- if reflect.TypeOf(err).AssignableTo(targetType) {
- val.Elem().Set(reflect.ValueOf(err))
- return true
- }
- if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) {
- return true
- }
- err = Unwrap(err)
- }
- return false
-}
-
-var errorType = reflect.TypeOf((*error)(nil)).Elem()
diff --git a/test/tools/vendor/modules.txt b/test/tools/vendor/modules.txt
index 462abe617..5b5cc4112 100644
--- a/test/tools/vendor/modules.txt
+++ b/test/tools/vendor/modules.txt
@@ -19,7 +19,7 @@ github.com/vbatts/git-validation/rules/dco
github.com/vbatts/git-validation/rules/messageregexp
github.com/vbatts/git-validation/rules/shortsubject
github.com/vbatts/git-validation/validate
-# golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3
+# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
@@ -27,7 +27,7 @@ golang.org/x/mod/semver
golang.org/x/sys/execabs
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
-# golang.org/x/tools v0.1.10
+# golang.org/x/tools v0.1.11
## explicit
golang.org/x/tools/cmd/goimports
golang.org/x/tools/go/ast/astutil
@@ -40,6 +40,3 @@ golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
golang.org/x/tools/internal/typeparams
-# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
-golang.org/x/xerrors
-golang.org/x/xerrors/internal
diff --git a/troubleshooting.md b/troubleshooting.md
index cf554654b..1fa044fe9 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -321,7 +321,7 @@ under `/var/lib/containers/storage`.
# restorecon -R -v /srv/containers
```
-The semanage command above tells SELinux to setup the default labeling of
+The semanage command above tells SELinux to set up the default labeling of
`/srv/containers` to match `/var/lib/containers`. The `restorecon` command
tells SELinux to apply the labels to the actual content.
@@ -387,7 +387,7 @@ error creating build container: Error committing the finished image: error addin
#### Solution
Choose one of the following:
- * Setup containers/storage in a different directory, not on an NFS share.
+ * Set up containers/storage in a different directory, not on an NFS share.
* Create a directory on a local file system.
* Edit `~/.config/containers/containers.conf` and point the `volume_path` option to that local directory. (Copy `/usr/share/containers/containers.conf` if `~/.config/containers/containers.conf` does not exist)
* Otherwise just run Podman as root, via `sudo podman`
@@ -663,7 +663,7 @@ $ podman run --rm --rootfs /path/to/rootfs true
The command above will create all the missing directories needed to run the container.
-After that, it can be used in read only mode, by multiple containers at the same time:
+After that, it can be used in read-only mode, by multiple containers at the same time:
```console
$ podman run --read-only --rootfs /path/to/rootfs ....
@@ -1217,3 +1217,72 @@ WARN[0000] Can't stat lower layer "/var/lib/containers/storage/overlay/l/7HS76F2
It is the user responsibility to make sure images in an additional
store are not deleted while being used by containers in another
store.
+
+### 36) Syncing bugfixes for podman-remote or setups using Podman API
+
+After upgrading Podman to a newer version an issue with the earlier version of Podman still presents itself while using podman-remote.
+
+#### Symptom
+
+While running podman remote commands with the most updated Podman, issues that were fixed in a prior version of Podman can arise either on the Podman client side or the Podman server side.
+
+#### Solution
+
+When upgrading Podman to a particular version for the required fixes, users often make the mistake of only upgrading the Podman client. However, suppose a setup uses `podman-remote` or uses a client that communicates with the Podman server on a remote machine via the REST API. In that case, it is required to upgrade both the Podman client and the Podman server running on the remote machine. Both the Podman client and server must be upgraded to the same version.
+
+Example: If a particular bug was fixed in `v4.1.0` then the Podman client must have version `v4.1.0` as well the Podman server must have version `v4.1.0`.
+
+### 37) Unexpected carriage returns are outputted on the terminal
+
+When using the __--tty__ (__-t__) flag, unexpected carriage returns are outputted on the terminal.
+
+#### Symptom
+
+The container program prints a newline (`\n`) but the terminal outputs a carriage return and a newline (`\r\n`).
+
+```
+$ podman run --rm -t fedora echo abc | od -c
+0000000 a b c \r \n
+0000005
+```
+
+When run directly on the host, the result is as expected.
+
+```
+$ echo abc | od -c
+0000000 a b c \n
+0000004
+```
+
+Extra carriage returns can also shift the prompt to the right.
+
+```
+$ podman run --rm -t fedora sh -c "echo 1; echo 2; echo 3" | cat -A
+1^M$
+ 2^M$
+ 3^M$
+ $
+```
+
+#### Solution
+
+Run Podman without the __--tty__ (__-t__) flag.
+
+```
+$ podman run --rm fedora echo abc | od -c
+0000000 a b c \n
+0000004
+```
+
+The __--tty__ (__-t__) flag should only be used when the program requires user interaction in the termainal, for instance expecting
+the user to type an answer to a question.
+
+Where does the extra carriage return `\r` come from?
+
+The extra `\r` is not outputted by Podman but by the terminal. In fact, a reconfiguration of the terminal can make the extra `\r` go away.
+
+```
+$ podman run --rm -t fedora /bin/sh -c "stty -onlcr && echo abc" | od -c
+0000000 a b c \n
+0000004
+```
diff --git a/utils/ports.go b/utils/ports.go
index 57a6f8275..eea060433 100644
--- a/utils/ports.go
+++ b/utils/ports.go
@@ -1,26 +1,25 @@
package utils
import (
+ "fmt"
"net"
"strconv"
-
- "github.com/pkg/errors"
)
// Find a random, open port on the host.
func GetRandomPort() (int, error) {
l, err := net.Listen("tcp", ":0")
if err != nil {
- return 0, errors.Wrapf(err, "unable to get free TCP port")
+ return 0, fmt.Errorf("unable to get free TCP port: %w", err)
}
defer l.Close()
_, randomPort, err := net.SplitHostPort(l.Addr().String())
if err != nil {
- return 0, errors.Wrapf(err, "unable to determine free port")
+ return 0, fmt.Errorf("unable to determine free port: %w", err)
}
rp, err := strconv.Atoi(randomPort)
if err != nil {
- return 0, errors.Wrapf(err, "unable to convert random port to int")
+ return 0, fmt.Errorf("unable to convert random port to int: %w", err)
}
return rp, nil
}
diff --git a/utils/testdata/cgroup.empty b/utils/testdata/cgroup.empty
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/testdata/cgroup.empty
diff --git a/utils/testdata/cgroup.other b/utils/testdata/cgroup.other
new file mode 100644
index 000000000..239a7cded
--- /dev/null
+++ b/utils/testdata/cgroup.other
@@ -0,0 +1 @@
+0::/other
diff --git a/utils/testdata/cgroup.root b/utils/testdata/cgroup.root
new file mode 100644
index 000000000..1e027b2a3
--- /dev/null
+++ b/utils/testdata/cgroup.root
@@ -0,0 +1 @@
+0::/
diff --git a/utils/utils.go b/utils/utils.go
index fd66ac2ed..997de150d 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -13,10 +13,8 @@ import (
"sync"
"github.com/containers/common/pkg/cgroups"
- "github.com/containers/podman/v4/libpod/define"
"github.com/containers/storage/pkg/archive"
"github.com/godbus/dbus/v5"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -53,57 +51,6 @@ func ExecCmdWithStdStreams(stdin io.Reader, stdout, stderr io.Writer, env []stri
return nil
}
-// ErrDetach is an error indicating that the user manually detached from the
-// container.
-var ErrDetach = define.ErrDetach
-
-// CopyDetachable is similar to io.Copy but support a detach key sequence to break out.
-func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) {
- buf := make([]byte, 32*1024)
- for {
- nr, er := src.Read(buf)
- if nr > 0 {
- preservBuf := []byte{}
- for i, key := range keys {
- preservBuf = append(preservBuf, buf[0:nr]...)
- if nr != 1 || buf[0] != key {
- break
- }
- if i == len(keys)-1 {
- return 0, ErrDetach
- }
- nr, er = src.Read(buf)
- }
- var nw int
- var ew error
- if len(preservBuf) > 0 {
- nw, ew = dst.Write(preservBuf)
- nr = len(preservBuf)
- } else {
- nw, ew = dst.Write(buf[0:nr])
- }
- if nw > 0 {
- written += int64(nw)
- }
- if ew != nil {
- err = ew
- break
- }
- if nr != nw {
- err = io.ErrShortWrite
- break
- }
- }
- if er != nil {
- if er != io.EOF {
- err = er
- }
- break
- }
- }
- return written, err
-}
-
// UntarToFileSystem untars an os.file of a tarball to a destination in the filesystem
func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOptions) error {
logrus.Debugf("untarring %s", tarball.Name())
@@ -114,7 +61,7 @@ func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOption
func CreateTarFromSrc(source string, dest string) error {
file, err := os.Create(dest)
if err != nil {
- return errors.Wrapf(err, "Could not create tarball file '%s'", dest)
+ return fmt.Errorf("could not create tarball file '%s': %w", dest, err)
}
defer file.Close()
return TarToFilesystem(source, file)
@@ -154,7 +101,7 @@ func RemoveScientificNotationFromFloat(x float64) (float64, error) {
}
result, err := strconv.ParseFloat(bigNum, 64)
if err != nil {
- return x, errors.Wrapf(err, "unable to remove scientific number from calculations")
+ return x, fmt.Errorf("unable to remove scientific number from calculations: %w", err)
}
return result, nil
}
@@ -181,11 +128,11 @@ func moveProcessPIDFileToScope(pidPath, slice, scope string) error {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrapf(err, "cannot read pid file %s", pidPath)
+ return fmt.Errorf("cannot read pid file %s: %w", pidPath, err)
}
pid, err := strconv.ParseUint(string(data), 10, 0)
if err != nil {
- return errors.Wrapf(err, "cannot parse pid file %s", pidPath)
+ return fmt.Errorf("cannot parse pid file %s: %w", pidPath, err)
}
return moveProcessToScope(int(pid), slice, scope)
@@ -243,27 +190,3 @@ func MovePauseProcessToScope(pausePidPath string) {
}
}
}
-
-// CreateSCPCommand takes an existing command, appends the given arguments and returns a configured podman command for image scp
-func CreateSCPCommand(cmd *exec.Cmd, command []string) *exec.Cmd {
- cmd.Args = append(cmd.Args, command...)
- cmd.Env = os.Environ()
- cmd.Stderr = os.Stderr
- cmd.Stdout = os.Stdout
- return cmd
-}
-
-// LoginUser starts the user process on the host so that image scp can use systemd-run
-func LoginUser(user string) (*exec.Cmd, error) {
- sleep, err := exec.LookPath("sleep")
- if err != nil {
- return nil, err
- }
- machinectl, err := exec.LookPath("machinectl")
- if err != nil {
- return nil, err
- }
- cmd := exec.Command(machinectl, "shell", "-q", user+"@.host", sleep, "inf")
- err = cmd.Start()
- return cmd, err
-}
diff --git a/utils/utils_supported.go b/utils/utils_supported.go
index 493ea61ce..d7d47b2bc 100644
--- a/utils/utils_supported.go
+++ b/utils/utils_supported.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
"github.com/godbus/dbus/v5"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -64,7 +63,7 @@ func RunUnderSystemdScope(pid int, slice string, unitName string) error {
return nil
}
-func getCgroupProcess(procFile string) (string, error) {
+func getCgroupProcess(procFile string, allowRoot bool) (string, error) {
f, err := os.Open(procFile)
if err != nil {
return "", err
@@ -72,12 +71,12 @@ func getCgroupProcess(procFile string) (string, error) {
defer f.Close()
scanner := bufio.NewScanner(f)
- cgroup := "/"
+ cgroup := ""
for scanner.Scan() {
line := scanner.Text()
parts := strings.SplitN(line, ":", 3)
if len(parts) != 3 {
- return "", errors.Errorf("cannot parse cgroup line %q", line)
+ return "", fmt.Errorf("cannot parse cgroup line %q", line)
}
if strings.HasPrefix(line, "0::") {
cgroup = line[3:]
@@ -87,20 +86,24 @@ func getCgroupProcess(procFile string) (string, error) {
cgroup = parts[2]
}
}
- if cgroup == "/" {
- return "", errors.Errorf("could not find cgroup mount in %q", procFile)
+ if len(cgroup) == 0 || (!allowRoot && cgroup == "/") {
+ return "", fmt.Errorf("could not find cgroup mount in %q", procFile)
}
return cgroup, nil
}
// GetOwnCgroup returns the cgroup for the current process.
func GetOwnCgroup() (string, error) {
- return getCgroupProcess("/proc/self/cgroup")
+ return getCgroupProcess("/proc/self/cgroup", true)
+}
+
+func GetOwnCgroupDisallowRoot() (string, error) {
+ return getCgroupProcess("/proc/self/cgroup", false)
}
// GetCgroupProcess returns the cgroup for the specified process process.
func GetCgroupProcess(pid int) (string, error) {
- return getCgroupProcess(fmt.Sprintf("/proc/%d/cgroup", pid))
+ return getCgroupProcess(fmt.Sprintf("/proc/%d/cgroup", pid), true)
}
// MoveUnderCgroupSubtree moves the PID under a cgroup subtree.
@@ -129,11 +132,11 @@ func moveUnderCgroup(cgroup, subtree string, processes []uint32) error {
line := scanner.Text()
parts := strings.SplitN(line, ":", 3)
if len(parts) != 3 {
- return errors.Errorf("cannot parse cgroup line %q", line)
+ return fmt.Errorf("cannot parse cgroup line %q", line)
}
// root cgroup, skip it
- if parts[2] == "/" {
+ if parts[2] == "/" && !(unifiedMode && parts[1] == "") {
continue
}
diff --git a/utils/utils_test.go b/utils/utils_test.go
new file mode 100644
index 000000000..f34dbdd7e
--- /dev/null
+++ b/utils/utils_test.go
@@ -0,0 +1,26 @@
+//go:build linux || darwin
+// +build linux darwin
+
+package utils
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCgroupProcess(t *testing.T) {
+ val, err := getCgroupProcess("testdata/cgroup.root", true)
+ assert.Nil(t, err)
+ assert.Equal(t, "/", val)
+
+ _, err = getCgroupProcess("testdata/cgroup.root", false)
+ assert.NotNil(t, err)
+
+ val, err = getCgroupProcess("testdata/cgroup.other", true)
+ assert.Nil(t, err)
+ assert.Equal(t, "/other", val)
+
+ _, err = getCgroupProcess("testdata/cgroup.empty", true)
+ assert.NotNil(t, err)
+}
diff --git a/utils/utils_windows.go b/utils/utils_windows.go
index 2c159ab06..18f232116 100644
--- a/utils/utils_windows.go
+++ b/utils/utils_windows.go
@@ -3,7 +3,7 @@
package utils
-import "github.com/pkg/errors"
+import "errors"
func RunUnderSystemdScope(pid int, slice string, unitName string) error {
return errors.New("not implemented for windows")
@@ -17,6 +17,10 @@ func GetOwnCgroup() (string, error) {
return "", errors.New("not implemented for windows")
}
+func GetOwnCgroupDisallowRoot() (string, error) {
+ return "", errors.New("not implemented for windows")
+}
+
func GetCgroupProcess(pid int) (string, error) {
return "", errors.New("not implemented for windows")
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
index 75499c967..1d45a703b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
@@ -4,17 +4,22 @@ import (
"context"
"encoding/json"
"errors"
+ "fmt"
"strings"
"sync"
"syscall"
+ "time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
+ "github.com/Microsoft/hcsshim/internal/jobobject"
"github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/vmcompute"
+ "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -28,7 +33,8 @@ type System struct {
waitBlock chan struct{}
waitError error
exitError error
- os, typ string
+ os, typ, owner string
+ startTime time.Time
}
func newSystem(id string) *System {
@@ -38,6 +44,11 @@ func newSystem(id string) *System {
}
}
+// Implementation detail for silo naming, this should NOT be relied upon very heavily.
+func siloNameFmt(containerID string) string {
+ return fmt.Sprintf(`\Container_%s`, containerID)
+}
+
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
operation := "hcs::CreateComputeSystem"
@@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
}
computeSystem.typ = strings.ToLower(props.SystemType)
computeSystem.os = strings.ToLower(props.RuntimeOSType)
+ computeSystem.owner = strings.ToLower(props.Owner)
if computeSystem.os == "" && computeSystem.typ == "container" {
// Pre-RS5 HCS did not return the OS, but it only supported containers
// that ran Windows.
@@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
if err != nil {
return makeSystemError(computeSystem, operation, err, events)
}
-
+ computeSystem.startTime = time.Now()
return nil
}
@@ -324,11 +336,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
return properties, nil
}
-// PropertiesV2 returns the requested container properties targeting a V2 schema container.
-func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
- computeSystem.handleLock.RLock()
- defer computeSystem.handleLock.RUnlock()
+// queryInProc handles querying for container properties without reaching out to HCS. `props`
+// will be updated to contain any data returned from the queries present in `types`. If any properties
+// failed to be queried they will be tallied up and returned in as the first return value. Failures on
+// query are NOT considered errors; the only failure case for this method is if the containers job object
+// cannot be opened.
+func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) {
+ // In the future we can make use of some new functionality in the HCS that allows you
+ // to pass a job object for HCS to use for the container. Currently, the only way we'll
+ // be able to open the job/silo is if we're running as SYSTEM.
+ jobOptions := &jobobject.Options{
+ UseNTVariant: true,
+ Name: siloNameFmt(computeSystem.id),
+ }
+ job, err := jobobject.Open(ctx, jobOptions)
+ if err != nil {
+ return nil, err
+ }
+ defer job.Close()
+
+ var fallbackQueryTypes []hcsschema.PropertyType
+ for _, propType := range types {
+ switch propType {
+ case hcsschema.PTStatistics:
+ // Handle a bad caller asking for the same type twice. No use in re-querying if this is
+ // filled in already.
+ if props.Statistics == nil {
+ props.Statistics, err = computeSystem.statisticsInProc(job)
+ if err != nil {
+ log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
+
+ fallbackQueryTypes = append(fallbackQueryTypes, propType)
+ }
+ }
+ default:
+ fallbackQueryTypes = append(fallbackQueryTypes, propType)
+ }
+ }
+
+ return fallbackQueryTypes, nil
+}
+
+// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
+// change to make grabbing the private working set total much more efficient.
+func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
+ // Start timestamp for these stats before we grab them to match HCS
+ timestamp := time.Now()
+
+ memInfo, err := job.QueryMemoryStats()
+ if err != nil {
+ return nil, err
+ }
+
+ processorInfo, err := job.QueryProcessorStats()
+ if err != nil {
+ return nil, err
+ }
+
+ storageInfo, err := job.QueryStorageStats()
+ if err != nil {
+ return nil, err
+ }
+
+ // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
+ // with the class SystemProcessInformation which returns an array containing system information for *every*
+ // process running on the machine. They then grab the pids that are running in the container and filter down
+ // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
+ // work well as performance should get worse if more processess are running on the machine in general and not
+ // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
+ // as well which isn't great and is wasted work to fetch.
+ //
+ // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
+ // working set ourselves and ask for everything else seperately. The optimization we can make here is
+ // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
+ // the private working set in a more efficient manner by:
+ //
+ // 1. Find the pids running in the silo
+ // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
+ // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
+ // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
+ privateWorkingSet, err := job.QueryPrivateWorkingSet()
+ if err != nil {
+ return nil, err
+ }
+ return &hcsschema.Statistics{
+ Timestamp: timestamp,
+ ContainerStartTime: computeSystem.startTime,
+ Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
+ Memory: &hcsschema.MemoryStats{
+ MemoryUsageCommitBytes: memInfo.JobMemory,
+ MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
+ MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
+ },
+ Processor: &hcsschema.ProcessorStats{
+ RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
+ RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
+ TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
+ },
+ Storage: &hcsschema.StorageStats{
+ ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
+ ReadSizeBytes: storageInfo.ReadStats.TotalSize,
+ WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
+ WriteSizeBytes: storageInfo.WriteStats.TotalSize,
+ },
+ }, nil
+}
+
+// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
+func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
operation := "hcs::System::PropertiesV2"
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
@@ -345,12 +461,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
if propertiesJSON == "" {
return nil, ErrUnexpectedValue
}
- properties := &hcsschema.Properties{}
- if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
+ props := &hcsschema.Properties{}
+ if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
return nil, makeSystemError(computeSystem, operation, err, nil)
}
- return properties, nil
+ return props, nil
+}
+
+// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
+func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
+ computeSystem.handleLock.RLock()
+ defer computeSystem.handleLock.RUnlock()
+
+ // Let HCS tally up the total for VM based queries instead of querying ourselves.
+ if computeSystem.typ != "container" {
+ return computeSystem.hcsPropertiesV2Query(ctx, types)
+ }
+
+ // Define a starter Properties struct with the default fields returned from every
+ // query. Owner is only returned from Statistics but it's harmless to include.
+ properties := &hcsschema.Properties{
+ Id: computeSystem.id,
+ SystemType: computeSystem.typ,
+ RuntimeOsType: computeSystem.os,
+ Owner: computeSystem.owner,
+ }
+
+ logEntry := log.G(ctx)
+ // First lets try and query ourselves without reaching to HCS. If any of the queries fail
+ // we'll take note and fallback to querying HCS for any of the failed types.
+ fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
+ if err == nil && len(fallbackTypes) == 0 {
+ return properties, nil
+ } else if err != nil {
+ logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
+ fallbackTypes = types
+ }
+
+ logEntry.WithFields(logrus.Fields{
+ logfields.ContainerID: computeSystem.id,
+ "propertyTypes": fallbackTypes,
+ }).Info("falling back to HCS for property type queries")
+
+ hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Now add in anything that we might have successfully queried in process.
+ if properties.Statistics != nil {
+ hcsProperties.Statistics = properties.Statistics
+ hcsProperties.Owner = properties.Owner
+ }
+
+ // For future support for querying processlist in-proc as well.
+ if properties.ProcessList != nil {
+ hcsProperties.ProcessList = properties.ProcessList
+ }
+
+ return hcsProperties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
index 591a2631e..84b368218 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
@@ -21,10 +21,11 @@ const (
)
type NatPolicy struct {
- Type PolicyType `json:"Type"`
- Protocol string `json:",omitempty"`
- InternalPort uint16 `json:",omitempty"`
- ExternalPort uint16 `json:",omitempty"`
+ Type PolicyType `json:"Type"`
+ Protocol string `json:",omitempty"`
+ InternalPort uint16 `json:",omitempty"`
+ ExternalPort uint16 `json:",omitempty"`
+ ExternalPortReserved bool `json:",omitempty"`
}
type QosPolicy struct {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
new file mode 100644
index 000000000..3d640ac7b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
@@ -0,0 +1,111 @@
+package jobobject
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/queue"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ ioInitOnce sync.Once
+ initIOErr error
+ // Global iocp handle that will be re-used for every job object
+ ioCompletionPort windows.Handle
+ // Mapping of job handle to queue to place notifications in.
+ jobMap sync.Map
+)
+
+// MsgAllProcessesExited is a type representing a message that every process in a job has exited.
+type MsgAllProcessesExited struct{}
+
+// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently.
+// This should not be treated as an error.
+type MsgUnimplemented struct{}
+
+// pollIOCP polls the io completion port forever.
+func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
+ var (
+ overlapped uintptr
+ code uint32
+ key uintptr
+ )
+
+ for {
+ err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed to poll for job object message")
+ continue
+ }
+ if val, ok := jobMap.Load(key); ok {
+ msq, ok := val.(*queue.MessageQueue)
+ if !ok {
+ log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map")
+ continue
+ }
+ notification, err := parseMessage(code, overlapped)
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "code": code,
+ "overlapped": overlapped,
+ }).Warn("failed to parse job object message")
+ continue
+ }
+ if err := msq.Write(notification); err == queue.ErrQueueClosed {
+ // Write will only return an error when the queue is closed.
+ // The only time a queue would ever be closed is when we call `Close` on
+ // the job it belongs to which also removes it from the jobMap, so something
+ // went wrong here. We can't return as this is reading messages for all jobs
+ // so just log it and move on.
+ log.G(ctx).WithFields(logrus.Fields{
+ "code": code,
+ "overlapped": overlapped,
+ }).Warn("tried to write to a closed queue")
+ continue
+ }
+ } else {
+ log.G(ctx).Warn("received a message for a job not present in the mapping")
+ }
+ }
+}
+
+func parseMessage(code uint32, overlapped uintptr) (interface{}, error) {
+ // Check code and parse out relevant information related to that notification
+ // that we care about. For now all we handle is the message that all processes
+ // in the job have exited.
+ switch code {
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
+ return MsgAllProcessesExited{}, nil
+ // Other messages for completeness and a check to make sure that if we fall
+ // into the default case that this is a code we don't know how to handle.
+ case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME:
+ case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME:
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT:
+ case winapi.JOB_OBJECT_MSG_NEW_PROCESS:
+ case winapi.JOB_OBJECT_MSG_EXIT_PROCESS:
+ case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
+ case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT:
+ case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT:
+ case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT:
+ default:
+ return nil, fmt.Errorf("unknown job notification type: %d", code)
+ }
+ return MsgUnimplemented{}, nil
+}
+
+// Assigns an IO completion port to get notified of events for the registered job
+// object.
+func attachIOCP(job windows.Handle, iocp windows.Handle) error {
+ info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{
+ CompletionKey: job,
+ CompletionPort: iocp,
+ }
+ _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
+ return err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
new file mode 100644
index 000000000..9c2726416
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
@@ -0,0 +1,499 @@
+package jobobject
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/queue"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "golang.org/x/sys/windows"
+)
+
+// This file provides higher level constructs for the win32 job object API.
+// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
+// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
+// structs and associated limit flags. Whatever is not present from the job object API
+// in golang.org/x/sys/windows is located in /internal/winapi.
+//
+// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
+
+// JobObject is a high level wrapper around a Windows job object. Holds a handle to
+// the job, a queue to receive iocp notifications about the lifecycle
+// of the job and a mutex for synchronized handle access.
+type JobObject struct {
+ handle windows.Handle
+ mq *queue.MessageQueue
+ handleLock sync.RWMutex
+}
+
+// JobLimits represents the resource constraints that can be applied to a job object.
+type JobLimits struct {
+ CPULimit uint32
+ CPUWeight uint32
+ MemoryLimitInBytes uint64
+ MaxIOPS int64
+ MaxBandwidth int64
+}
+
+type CPURateControlType uint32
+
+const (
+ WeightBased CPURateControlType = iota
+ RateBased
+)
+
+// Processor resource controls
+const (
+ cpuLimitMin = 1
+ cpuLimitMax = 10000
+ cpuWeightMin = 1
+ cpuWeightMax = 9
+)
+
+var (
+ ErrAlreadyClosed = errors.New("the handle has already been closed")
+ ErrNotRegistered = errors.New("job is not registered to receive notifications")
+)
+
+// Options represents the set of configurable options when making or opening a job object.
+type Options struct {
+ // `Name` specifies the name of the job object if a named job object is desired.
+ Name string
+ // `Notifications` specifies if the job will be registered to receive notifications.
+ // Defaults to false.
+ Notifications bool
+ // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
+ // Defaults to false.
+ UseNTVariant bool
+}
+
+// Create creates a job object.
+//
+// If options.Name is an empty string, the job will not be assigned a name.
+//
+// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`.
+//
+// If `options` is nil, use default option values.
+//
+// Returns a JobObject structure and an error if there is one.
+func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
+ if options == nil {
+ options = &Options{}
+ }
+
+ var jobName *winapi.UnicodeString
+ if options.Name != "" {
+ jobName, err = winapi.NewUnicodeString(options.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var jobHandle windows.Handle
+ if options.UseNTVariant {
+ oa := winapi.ObjectAttributes{
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
+ ObjectName: jobName,
+ Attributes: 0,
+ }
+ status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
+ if status != 0 {
+ return nil, winapi.RtlNtStatusToDosError(status)
+ }
+ } else {
+ var jobNameBuf *uint16
+ if jobName != nil && jobName.Buffer != nil {
+ jobNameBuf = jobName.Buffer
+ }
+ jobHandle, err = windows.CreateJobObject(nil, jobNameBuf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ windows.Close(jobHandle)
+ }
+ }()
+
+ job := &JobObject{
+ handle: jobHandle,
+ }
+
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been
+ // created, create it and start polling.
+ if options.Notifications {
+ mq, err := setupNotifications(ctx, job)
+ if err != nil {
+ return nil, err
+ }
+ job.mq = mq
+ }
+
+ return job, nil
+}
+
+// Open opens an existing job object with name provided in `options`. If no name is provided
+// return an error since we need to know what job object to open.
+//
+// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`.
+//
+// Returns a JobObject structure and an error if there is one.
+func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
+ if options == nil || (options != nil && options.Name == "") {
+ return nil, errors.New("no job object name specified to open")
+ }
+
+ unicodeJobName, err := winapi.NewUnicodeString(options.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ var jobHandle windows.Handle
+ if options != nil && options.UseNTVariant {
+ oa := winapi.ObjectAttributes{
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
+ ObjectName: unicodeJobName,
+ Attributes: 0,
+ }
+ status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
+ if status != 0 {
+ return nil, winapi.RtlNtStatusToDosError(status)
+ }
+ } else {
+ jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ windows.Close(jobHandle)
+ }
+ }()
+
+ job := &JobObject{
+ handle: jobHandle,
+ }
+
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been
+ // created, create it and start polling.
+ if options != nil && options.Notifications {
+ mq, err := setupNotifications(ctx, job)
+ if err != nil {
+ return nil, err
+ }
+ job.mq = mq
+ }
+
+ return job, nil
+}
+
+// helper function to setup notifications for creating/opening a job object
+func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ ioInitOnce.Do(func() {
+ h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
+ if err != nil {
+ initIOErr = err
+ return
+ }
+ ioCompletionPort = h
+ go pollIOCP(ctx, h)
+ })
+
+ if initIOErr != nil {
+ return nil, initIOErr
+ }
+
+ mq := queue.NewMessageQueue()
+ jobMap.Store(uintptr(job.handle), mq)
+ if err := attachIOCP(job.handle, ioCompletionPort); err != nil {
+ jobMap.Delete(uintptr(job.handle))
+ return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err)
+ }
+ return mq, nil
+}
+
+// PollNotification will poll for a job object notification. This call should only be called once
+// per job (ideally in a goroutine loop) and will block if there is not a notification ready.
+// This call will return immediately with error `ErrNotRegistered` if the job was not registered
+// to receive notifications during `Create`. Internally, messages will be queued and there
+// is no worry of messages being dropped.
+func (job *JobObject) PollNotification() (interface{}, error) {
+ if job.mq == nil {
+ return nil, ErrNotRegistered
+ }
+ return job.mq.ReadOrWait()
+}
+
+// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
+// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process
+// has already started running.
+func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if err := attrList.Update(
+ winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST,
+ unsafe.Pointer(&job.handle),
+ unsafe.Sizeof(job.handle),
+ ); err != nil {
+ return fmt.Errorf("failed to update proc thread attributes for job object: %w", err)
+ }
+
+ return nil
+}
+
+// Close closes the job object handle.
+func (job *JobObject) Close() error {
+ job.handleLock.Lock()
+ defer job.handleLock.Unlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if err := windows.Close(job.handle); err != nil {
+ return err
+ }
+
+ if job.mq != nil {
+ job.mq.Close()
+ }
+ // Handles now invalid so if the map entry to receive notifications for this job still
+ // exists remove it so we can stop receiving notifications.
+ if _, ok := jobMap.Load(uintptr(job.handle)); ok {
+ jobMap.Delete(uintptr(job.handle))
+ }
+
+ job.handle = 0
+ return nil
+}
+
+// Assign assigns a process to the job object.
+func (job *JobObject) Assign(pid uint32) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if pid == 0 {
+ return errors.New("invalid pid: 0")
+ }
+ hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid)
+ if err != nil {
+ return err
+ }
+ defer windows.Close(hProc)
+ return windows.AssignProcessToJobObject(job.handle, hProc)
+}
+
+// Terminate terminates the job, essentially calls TerminateProcess on every process in the
+// job.
+func (job *JobObject) Terminate(exitCode uint32) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+ return windows.TerminateJobObject(job.handle, exitCode)
+}
+
+// Pids returns all of the process IDs in the job object.
+func (job *JobObject) Pids() ([]uint32, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{}
+ err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicProcessIdList,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ )
+
+ // This is either the case where there is only one process or no processes in
+ // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList
+ // is 1 and just return this, otherwise return an empty slice.
+ if err == nil {
+ if info.NumberOfProcessIdsInList == 1 {
+ return []uint32{uint32(info.ProcessIdList[0])}, nil
+ }
+ // Return empty slice instead of nil to play well with the caller of this.
+ // Do not return an error if no processes are running inside the job
+ return []uint32{}, nil
+ }
+
+ if err != winapi.ERROR_MORE_DATA {
+ return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
+ }
+
+ jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1))
+ buf := make([]byte, jobBasicProcessIDListSize)
+ if err = winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicProcessIdList,
+ uintptr(unsafe.Pointer(&buf[0])),
+ uint32(len(buf)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err)
+ }
+
+ bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
+ pids := make([]uint32, bufInfo.NumberOfProcessIdsInList)
+ for i, bufPid := range bufInfo.AllPids() {
+ pids[i] = uint32(bufPid)
+ }
+ return pids, nil
+}
+
+// QueryMemoryStats gets the memory stats for the job object.
+func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectMemoryUsageInformation,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object memory stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryProcessorStats gets the processor stats for the job object.
+func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicAccountingInformation,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object process stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryStorageStats gets the storage (I/O) stats for the job object.
+func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
+ ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
+ }
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectIoAttribution,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object storage stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
+// private working set for every process running in the job.
+func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
+ pids, err := job.Pids()
+ if err != nil {
+ return 0, err
+ }
+
+ openAndQueryWorkingSet := func(pid uint32) (uint64, error) {
+ h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
+ if err != nil {
+ // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a
+ // case where one of the pids in the job exited before we open.
+ return 0, nil
+ }
+ defer func() {
+ _ = windows.Close(h)
+ }()
+ // Check if the process is actually running in the job still. There's a small chance
+ // that the process could have exited and had its pid re-used between grabbing the pids
+ // in the job and opening the handle to it above.
+ var inJob int32
+ if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil {
+ // This shouldn't fail unless we have incorrect access rights which we control
+ // here so probably best to error out if this failed.
+ return 0, err
+ }
+ // Don't report stats for this process as it's not running in the job. This shouldn't be
+ // an error condition though.
+ if inJob == 0 {
+ return 0, nil
+ }
+
+ var vmCounters winapi.VM_COUNTERS_EX2
+ status := winapi.NtQueryInformationProcess(
+ h,
+ winapi.ProcessVmCounters,
+ uintptr(unsafe.Pointer(&vmCounters)),
+ uint32(unsafe.Sizeof(vmCounters)),
+ nil,
+ )
+ if !winapi.NTSuccess(status) {
+ return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status))
+ }
+ return uint64(vmCounters.PrivateWorkingSetSize), nil
+ }
+
+ var jobWorkingSetSize uint64
+ for _, pid := range pids {
+ workingSet, err := openAndQueryWorkingSet(pid)
+ if err != nil {
+ return 0, err
+ }
+ jobWorkingSetSize += workingSet
+ }
+
+ return jobWorkingSetSize, nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
new file mode 100644
index 000000000..4be297788
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
@@ -0,0 +1,315 @@
+package jobobject
+
+import (
+ "errors"
+ "fmt"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "golang.org/x/sys/windows"
+)
+
+const (
+ memoryLimitMax uint64 = 0xffffffffffffffff
+)
+
+func isFlagSet(flag, controlFlags uint32) bool {
+ return (flag & controlFlags) == flag
+}
+
+// SetResourceLimits sets resource limits on the job object (cpu, memory, storage).
+func (job *JobObject) SetResourceLimits(limits *JobLimits) error {
+ // Go through and check what limits were specified and apply them to the job.
+ if limits.MemoryLimitInBytes != 0 {
+ if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil {
+ return fmt.Errorf("failed to set job object memory limit: %w", err)
+ }
+ }
+
+ if limits.CPULimit != 0 {
+ if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil {
+ return fmt.Errorf("failed to set job object cpu limit: %w", err)
+ }
+ } else if limits.CPUWeight != 0 {
+ if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil {
+ return fmt.Errorf("failed to set job object cpu limit: %w", err)
+ }
+ }
+
+ if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 {
+ if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil {
+ return fmt.Errorf("failed to set io limit on job object: %w", err)
+ }
+ }
+ return nil
+}
+
+// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate
+// all processes in the job on the last open handle being closed.
+func (job *JobObject) SetTerminateOnLastHandleClose() error {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ return job.setExtendedInformation(info)
+}
+
+// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`.
+func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error {
+ if memoryLimitInBytes >= memoryLimitMax {
+ return errors.New("memory limit specified exceeds the max size")
+ }
+
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+
+ info.JobMemoryLimit = uintptr(memoryLimitInBytes)
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY
+ return job.setExtendedInformation(info)
+}
+
+// GetMemoryLimit gets the memory limit in bytes of the job object.
+func (job *JobObject) GetMemoryLimit() (uint64, error) {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return 0, err
+ }
+ return uint64(info.JobMemoryLimit), nil
+}
+
+// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to
+// `rateControlValue` for the job object.
+func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error {
+ cpuInfo, err := job.getCPURateControlInformation()
+ if err != nil {
+ return err
+ }
+ switch rateControlType {
+ case WeightBased:
+ if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax {
+ return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue)
+ }
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
+ cpuInfo.Value = rateControlValue
+ case RateBased:
+ if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax {
+ return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue)
+ }
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
+ cpuInfo.Value = rateControlValue
+ default:
+ return errors.New("invalid job object cpu rate control type")
+ }
+ return job.setCPURateControlInfo(cpuInfo)
+}
+
+// GetCPULimit gets the cpu limits for the job object.
+// `rateControlType` is used to indicate what type of cpu limit to query for.
+func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) {
+ info, err := job.getCPURateControlInformation()
+ if err != nil {
+ return 0, err
+ }
+
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) {
+ return 0, errors.New("the job does not have cpu rate control enabled")
+ }
+
+ switch rateControlType {
+ case WeightBased:
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) {
+ return 0, errors.New("cannot get cpu weight for job object without cpu weight option set")
+ }
+ case RateBased:
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) {
+ return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set")
+ }
+ default:
+ return 0, errors.New("invalid job object cpu rate control type")
+ }
+ return info.Value, nil
+}
+
+// SetCPUAffinity sets the processor affinity for the job object.
+// The affinity is passed in as a bitmask.
+func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+ info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
+ info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
+ return job.setExtendedInformation(info)
+}
+
+// GetCPUAffinity gets the processor affinity for the job object.
+// The returned affinity is a bitmask.
+func (job *JobObject) GetCPUAffinity() (uint64, error) {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return 0, err
+ }
+ return uint64(info.BasicLimitInformation.Affinity), nil
+}
+
+// SetIOLimit sets the IO limits specified on the job object.
+func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error {
+ ioInfo, err := job.getIOLimit()
+ if err != nil {
+ return err
+ }
+ ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE
+ if maxBandwidth != 0 {
+ ioInfo.MaxBandwidth = maxBandwidth
+ }
+ if maxIOPS != 0 {
+ ioInfo.MaxIops = maxIOPS
+ }
+ return job.setIORateControlInfo(ioInfo)
+}
+
+// GetIOMaxBandwidthLimit gets the max bandwidth for the job object.
+func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) {
+ info, err := job.getIOLimit()
+ if err != nil {
+ return 0, err
+ }
+ return info.MaxBandwidth, nil
+}
+
+// GetIOMaxIopsLimit gets the max iops for the job object.
+func (job *JobObject) GetIOMaxIopsLimit() (int64, error) {
+ info, err := job.getIOLimit()
+ if err != nil {
+ return 0, err
+ }
+ return info.MaxIops, nil
+}
+
+// Helper function for getting a job object's extended information.
+func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ windows.JobObjectExtendedLimitInformation,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", info, err)
+ }
+ return &info, nil
+}
+
+// Helper function for getting a job object's CPU rate control information.
+func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ windows.JobObjectCpuRateControlInformation,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", info, err)
+ }
+ return &info, nil
+}
+
+// Helper function for setting a job object's extended information.
+func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if _, err := windows.SetInformationJobObject(
+ job.handle,
+ windows.JobObjectExtendedLimitInformation,
+ uintptr(unsafe.Pointer(info)),
+ uint32(unsafe.Sizeof(*info)),
+ ); err != nil {
+ return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err)
+ }
+ return nil
+}
+
+// Helper function for querying job handle for IO limit information.
+func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{}
+ var blockCount uint32 = 1
+
+ if _, err := winapi.QueryIoRateControlInformationJobObject(
+ job.handle,
+ nil,
+ &ioInfo,
+ &blockCount,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err)
+ }
+
+ if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) {
+ return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo)
+ }
+ return ioInfo, nil
+}
+
+// Helper function for setting a job object's IO rate control information.
+func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil {
+ return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err)
+ }
+ return nil
+}
+
+// Helper function for setting a job object's CPU rate control information.
+func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+ if _, err := windows.SetInformationJobObject(
+ job.handle,
+ windows.JobObjectCpuRateControlInformation,
+ uintptr(unsafe.Pointer(cpuInfo)),
+ uint32(unsafe.Sizeof(cpuInfo)),
+ ); err != nil {
+ return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
new file mode 100644
index 000000000..e177c9a62
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
@@ -0,0 +1,111 @@
+package queue
+
+import (
+ "errors"
+ "sync"
+)
+
+var (
+ ErrQueueClosed = errors.New("the queue is closed for reading and writing")
+ ErrQueueEmpty = errors.New("the queue is empty")
+)
+
+// MessageQueue represents a threadsafe message queue to be used to retrieve or
+// write messages to.
+type MessageQueue struct {
+ m *sync.RWMutex
+ c *sync.Cond
+ messages []interface{}
+ closed bool
+}
+
+// NewMessageQueue returns a new MessageQueue.
+func NewMessageQueue() *MessageQueue {
+ m := &sync.RWMutex{}
+ return &MessageQueue{
+ m: m,
+ c: sync.NewCond(m),
+ messages: []interface{}{},
+ }
+}
+
+// Write writes `msg` to the queue.
+func (mq *MessageQueue) Write(msg interface{}) error {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+
+ if mq.closed {
+ return ErrQueueClosed
+ }
+ mq.messages = append(mq.messages, msg)
+ // Signal a waiter that there is now a value available in the queue.
+ mq.c.Signal()
+ return nil
+}
+
+// Read will read a value from the queue if available, otherwise return an error.
+func (mq *MessageQueue) Read() (interface{}, error) {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+ if mq.closed {
+ return nil, ErrQueueClosed
+ }
+ if mq.isEmpty() {
+ return nil, ErrQueueEmpty
+ }
+ val := mq.messages[0]
+ mq.messages[0] = nil
+ mq.messages = mq.messages[1:]
+ return val, nil
+}
+
+// ReadOrWait will read a value from the queue if available, else it will wait for a
+// value to become available. This will block forever if nothing gets written or until
+// the queue gets closed.
+func (mq *MessageQueue) ReadOrWait() (interface{}, error) {
+ mq.m.Lock()
+ if mq.closed {
+ mq.m.Unlock()
+ return nil, ErrQueueClosed
+ }
+ if mq.isEmpty() {
+ for !mq.closed && mq.isEmpty() {
+ mq.c.Wait()
+ }
+ mq.m.Unlock()
+ return mq.Read()
+ }
+ val := mq.messages[0]
+ mq.messages[0] = nil
+ mq.messages = mq.messages[1:]
+ mq.m.Unlock()
+ return val, nil
+}
+
+// IsEmpty returns if the queue is empty
+func (mq *MessageQueue) IsEmpty() bool {
+ mq.m.RLock()
+ defer mq.m.RUnlock()
+ return len(mq.messages) == 0
+}
+
+// Nonexported empty check that doesn't lock so we can call this in Read and Write.
+func (mq *MessageQueue) isEmpty() bool {
+ return len(mq.messages) == 0
+}
+
+// Close closes the queue for future writes or reads. Any attempts to read or write from the
+// queue after close will return ErrQueueClosed. This is safe to call multiple times.
+func (mq *MessageQueue) Close() {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+ // Already closed
+ if mq.closed {
+ return
+ }
+ mq.messages = nil
+ mq.closed = true
+ // If there's anybody currently waiting on a value from ReadOrWait, we need to
+ // broadcast so the read(s) can return ErrQueueClosed.
+ mq.c.Broadcast()
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
deleted file mode 100644
index 4e609cbf1..000000000
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package winapi
-
-//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
index ba12b1ad9..479649db3 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
@@ -24,7 +24,10 @@ const (
// Access rights for creating or opening job objects.
//
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
-const JOB_OBJECT_ALL_ACCESS = 0x1F001F
+const (
+ JOB_OBJECT_QUERY = 0x0004
+ JOB_OBJECT_ALL_ACCESS = 0x1F001F
+)
// IO limit flags
//
@@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
// AllPids returns all the process Ids in the job object.
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
- return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList]
+ return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList]
}
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
@@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// PBOOL Result
// );
//
-//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
+//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob
// BOOL QueryInformationJobObject(
// HANDLE hJob,
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
index 37839435b..5f9e03fd2 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
@@ -6,3 +6,60 @@ const (
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
)
+
+// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures.
+const ProcessVmCounters = 3
+
+// __kernel_entry NTSTATUS NtQueryInformationProcess(
+// [in] HANDLE ProcessHandle,
+// [in] PROCESSINFOCLASS ProcessInformationClass,
+// [out] PVOID ProcessInformation,
+// [in] ULONG ProcessInformationLength,
+// [out, optional] PULONG ReturnLength
+// );
+//
+//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
+
+// typedef struct _VM_COUNTERS_EX
+// {
+// SIZE_T PeakVirtualSize;
+// SIZE_T VirtualSize;
+// ULONG PageFaultCount;
+// SIZE_T PeakWorkingSetSize;
+// SIZE_T WorkingSetSize;
+// SIZE_T QuotaPeakPagedPoolUsage;
+// SIZE_T QuotaPagedPoolUsage;
+// SIZE_T QuotaPeakNonPagedPoolUsage;
+// SIZE_T QuotaNonPagedPoolUsage;
+// SIZE_T PagefileUsage;
+// SIZE_T PeakPagefileUsage;
+// SIZE_T PrivateUsage;
+// } VM_COUNTERS_EX, *PVM_COUNTERS_EX;
+//
+type VM_COUNTERS_EX struct {
+ PeakVirtualSize uintptr
+ VirtualSize uintptr
+ PageFaultCount uint32
+ PeakWorkingSetSize uintptr
+ WorkingSetSize uintptr
+ QuotaPeakPagedPoolUsage uintptr
+ QuotaPagedPoolUsage uintptr
+ QuotaPeakNonPagedPoolUsage uintptr
+ QuotaNonPagedPoolUsage uintptr
+ PagefileUsage uintptr
+ PeakPagefileUsage uintptr
+ PrivateUsage uintptr
+}
+
+// typedef struct _VM_COUNTERS_EX2
+// {
+// VM_COUNTERS_EX CountersEx;
+// SIZE_T PrivateWorkingSetSize;
+// SIZE_T SharedCommitUsage;
+// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2;
+//
+type VM_COUNTERS_EX2 struct {
+ CountersEx VM_COUNTERS_EX
+ PrivateWorkingSetSize uintptr
+ SharedCommitUsage uintptr
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
index 1d4ba3c4f..d2cc9d9fb 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
@@ -2,4 +2,4 @@
// be thought of as an extension to golang.org/x/sys/windows.
package winapi
-//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
+//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
index 4eb64b4c0..39fb3e1ad 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
@@ -50,7 +50,6 @@ var (
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procSearchPathW = modkernel32.NewProc("SearchPathW")
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
- procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
@@ -61,6 +60,7 @@ var (
procLogonUserW = modadvapi32.NewProc("LogonUserW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
+ procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
@@ -140,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
return
}
-func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
-func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
+func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
if r1 == 0 {
if e1 != 0 {
@@ -256,6 +244,12 @@ func LocalFree(ptr uintptr) {
return
}
+func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) {
+ r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
+ status = uint32(r0)
+ return
+}
+
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
amount = uint32(r0)
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
index e79bffe63..55ed392a0 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
@@ -16,6 +16,7 @@ package invoke
import (
"context"
+ "encoding/json"
"fmt"
"os"
@@ -33,6 +34,43 @@ type Exec interface {
Decode(jsonBytes []byte) (version.PluginInfo, error)
}
+// Plugin must return result in same version as specified in netconf; but
+// for backwards compatibility reasons if the result version is empty use
+// config version (rather than technically correct 0.1.0).
+// https://github.com/containernetworking/cni/issues/895
+func fixupResultVersion(netconf, result []byte) (string, []byte, error) {
+ versionDecoder := &version.ConfigDecoder{}
+ confVersion, err := versionDecoder.Decode(netconf)
+ if err != nil {
+ return "", nil, err
+ }
+
+ var rawResult map[string]interface{}
+ if err := json.Unmarshal(result, &rawResult); err != nil {
+ return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err)
+ }
+
+ // Manually decode Result version; we need to know whether its cniVersion
+ // is empty, while built-in decoders (correctly) substitute 0.1.0 for an
+ // empty version per the CNI spec.
+ if resultVerRaw, ok := rawResult["cniVersion"]; ok {
+ resultVer, ok := resultVerRaw.(string)
+ if ok && resultVer != "" {
+ return resultVer, result, nil
+ }
+ }
+
+ // If the cniVersion is not present or empty, assume the result is
+ // the same CNI spec version as the config
+ rawResult["cniVersion"] = confVersion
+ newBytes, err := json.Marshal(rawResult)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err)
+ }
+
+ return confVersion, newBytes, nil
+}
+
// For example, a testcase could pass an instance of the following fakeExec
// object to ExecPluginWithResult() to verify the incoming stdin and environment
// and provide a tailored response:
@@ -84,7 +122,12 @@ func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte
return nil, err
}
- return create.CreateFromBytes(stdoutBytes)
+ resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return create.Create(resultVersion, fixedBytes)
}
func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index ccf7be53a..1b25b190c 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -29,9 +29,9 @@ env:
IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-36"
PRIOR_FEDORA_NAME: "fedora-35"
- UBUNTU_NAME: "ubuntu-2110"
+ UBUNTU_NAME: "ubuntu-2204"
- IMAGE_SUFFIX: "c4955393725038592"
+ IMAGE_SUFFIX: "c6193881921355776"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
@@ -119,7 +119,7 @@ vendor_task:
# Runs within Cirrus's "community cluster"
container:
- image: docker.io/library/golang:1.16
+ image: docker.io/library/golang:1.17
cpu: 1
memory: 1
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index 36fa66893..46e5dc9d6 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,11 +2,6 @@
# Changelog
-## v1.26.1 (2022-05-04)
-
- Make `buildah build --label foo` create an empty "foo" label again
- Bump to v1.27.0-dev
-
## v1.26.0 (2022-05-04)
imagebuildah,build: move deepcopy of args before we spawn goroutine
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index 95ce322b8..c9121cc87 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -26,7 +26,8 @@ export GO_TEST=$(GO) test
endif
RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
-GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed"))
+COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
+GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
@@ -41,11 +42,11 @@ SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go doc
LINTFLAGS ?=
-ifeq ($(DEBUG), 1)
+ifeq ($(BUILDDEBUG), 1)
override GOGCFLAGS += -N -l
endif
-# make all DEBUG=1
+# make all BUILDDEBUG=1
# Note: Uses the -N -l go compiler options to disable compiler optimizations
# and inlining. Using these build options allows you to subsequently
# use source debugging tools like delve.
@@ -177,7 +178,7 @@ test-unit: tests/testreport/testreport
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
vendor-in-container:
- podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor
+ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.17 make vendor
.PHONY: vendor
vendor:
diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go
index 0e45d12c2..83ca2933f 100644
--- a/vendor/github.com/containers/buildah/bind/mount.go
+++ b/vendor/github.com/containers/buildah/bind/mount.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package bind
@@ -9,6 +10,7 @@ import (
"syscall"
"github.com/containers/buildah/util"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -190,11 +192,11 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
// Decide if the mount should not be redirected to an intermediate location first.
func leaveBindMountAlone(mount specs.Mount) bool {
// If we know we shouldn't do a redirection for this mount, skip it.
- if util.StringInSlice(NoBindOption, mount.Options) {
+ if cutil.StringInSlice(NoBindOption, mount.Options) {
return true
}
// If we're not bind mounting it in, we don't need to do anything for it.
- if mount.Type != "bind" && !util.StringInSlice("bind", mount.Options) && !util.StringInSlice("rbind", mount.Options) {
+ if mount.Type != "bind" && !cutil.StringInSlice("bind", mount.Options) && !cutil.StringInSlice("rbind", mount.Options) {
return true
}
return false
@@ -289,7 +291,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
}
}
// if we're also supposed to remove this thing, do that, too
- if util.StringInSlice(mount.Mountpoint, mountpointsToRemove) {
+ if cutil.StringInSlice(mount.Mountpoint, mountpointsToRemove) {
if err := os.Remove(mount.Mountpoint); err != nil {
return errors.Wrapf(err, "error removing %q", mount.Mountpoint)
}
diff --git a/vendor/github.com/containers/buildah/bind/util.go b/vendor/github.com/containers/buildah/bind/util.go
index 5115368d7..3f77f3e51 100644
--- a/vendor/github.com/containers/buildah/bind/util.go
+++ b/vendor/github.com/containers/buildah/bind/util.go
@@ -1,7 +1,7 @@
package bind
import (
- "github.com/containers/buildah/util"
+ "github.com/containers/common/pkg/util"
"github.com/opencontainers/runtime-spec/specs-go"
)
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 127c674bf..a8a010bcd 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,7 +1,3 @@
-- Changelog for v1.26.1 (2022-05-04)
- * Make `buildah build --label foo` create an empty "foo" label again
- * Bump to v1.27.0-dev
-
- Changelog for v1.26.0 (2022-05-04)
* imagebuildah,build: move deepcopy of args before we spawn goroutine
* Vendor in containers/storage v1.40.2
diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go
index f130f7a22..f36359e34 100644
--- a/vendor/github.com/containers/buildah/chroot/seccomp.go
+++ b/vendor/github.com/containers/buildah/chroot/seccomp.go
@@ -1,3 +1,4 @@
+//go:build linux && seccomp
// +build linux,seccomp
package chroot
@@ -21,7 +22,7 @@ func setSeccomp(spec *specs.Spec) error {
mapAction := func(specAction specs.LinuxSeccompAction, errnoRet *uint) libseccomp.ScmpAction {
switch specAction {
case specs.ActKill:
- return libseccomp.ActKill
+ return libseccomp.ActKillThread
case specs.ActTrap:
return libseccomp.ActTrap
case specs.ActErrno:
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index ca597e222..6122a6696 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -69,6 +69,10 @@ type CommitOptions struct {
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
+ // OmitHistory tells the builder to ignore the history of build layers and
+ // base while preparing image-spec, setting this to true will ensure no history
+ // is added to the image-spec. (default false)
+ OmitHistory bool
// BlobDirectory is the name of a directory in which we'll look for
// prebuilt copies of layer blobs that we might otherwise need to
// regenerate from on-disk layers. If blobs are available, the
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index e009ed763..0b6cf4e45 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -11,7 +11,7 @@ import (
"github.com/containerd/containerd/platforms"
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
- "github.com/containers/buildah/util"
+ "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
index 00aa29ccc..23bf0fb45 100644
--- a/vendor/github.com/containers/buildah/copier/copier.go
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -40,14 +40,6 @@ const (
func init() {
reexec.Register(copierCommand, copierMain)
- // Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic
- // modules to handle looking up user and host information) to load modules that match the libc
- // our binary is currently using. Hopefully they're loaded on first use, so that they won't
- // need to be loaded after we've chrooted into the rootfs, which could include modules that
- // don't match our libc and which can't be loaded, or modules which we don't want to execute
- // because we don't trust their code.
- _, _ = user.Lookup("buildah")
- _, _ = net.LookupHost("localhost")
}
// isArchivePath returns true if the specified path can be read like a (possibly
@@ -352,6 +344,7 @@ type PutOptions struct {
IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes
IgnoreDevices bool // ignore items which are character or block devices
NoOverwriteDirNonDir bool // instead of quietly overwriting directories with non-directories, return an error
+ NoOverwriteNonDirDir bool // instead of quietly overwriting non-directories with directories, return an error
Rename map[string]string // rename items with the specified names, or under the specified names
}
@@ -712,6 +705,15 @@ func copierMain() {
encoder := json.NewEncoder(os.Stdout)
previousRequestRoot := ""
+ // Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic
+ // modules to handle looking up user and host information) to load modules that match the libc
+ // our binary is currently using. Hopefully they're loaded on first use, so that they won't
+ // need to be loaded after we've chrooted into the rootfs, which could include modules that
+ // don't match our libc and which can't be loaded, or modules which we don't want to execute
+ // because we don't trust their code.
+ _, _ = user.Lookup("buildah")
+ _, _ = net.LookupHost("localhost")
+
// Set logging.
if level := os.Getenv("LOGLEVEL"); level != "" {
if ll, err := strconv.Atoi(level); err == nil {
@@ -1793,12 +1795,15 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
}
case tar.TypeDir:
if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) {
- var st os.FileInfo
- if st, err = os.Lstat(path); err == nil && !st.IsDir() {
- // it's not a directory, so remove it and mkdir
+ if st, stErr := os.Lstat(path); stErr == nil && !st.IsDir() {
+ if req.PutOptions.NoOverwriteNonDirDir {
+ break
+ }
if err = os.Remove(path); err == nil {
err = os.Mkdir(path, 0700)
}
+ } else {
+ err = stErr
}
// either we removed it and retried, or it was a directory,
// in which case we want to just add the new stuff under it
diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go
index 568be203c..a31ff0309 100644
--- a/vendor/github.com/containers/buildah/define/build.go
+++ b/vendor/github.com/containers/buildah/define/build.go
@@ -11,10 +11,29 @@ import (
"golang.org/x/sync/semaphore"
)
+// AdditionalBuildContext contains verbose details about a parsed build context from --build-context
+type AdditionalBuildContext struct {
+ // Value is the URL of an external tar archive.
+ IsURL bool
+ // Value is the name of an image which may or may not have already been pulled.
+ IsImage bool
+ // Value holds a URL, an image name, or an absolute filesystem path.
+ Value string
+ // Absolute filesystem path to downloaded and exported build context
+ // from external tar archive. This will be populated only if following
+ // buildcontext is created from IsURL and was downloaded before in any
+ // of the RUN step.
+ DownloadedCache string
+}
+
// CommonBuildOptions are resources that can be defined by flags for both buildah from and build
type CommonBuildOptions struct {
// AddHost is the list of hostnames to add to the build container's /etc/hosts.
AddHost []string
+ // OmitHistory tells the builder to ignore the history of build layers and
+ // base while preparing image-spec, setting this to true will ensure no history
+ // is added to the image-spec. (default false)
+ OmitHistory bool
// CgroupParent is the path to cgroups under which the cgroup for the container will be created.
CgroupParent string
// CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period
@@ -121,6 +140,8 @@ type BuildOptions struct {
Compression archive.Compression
// Arguments which can be interpolated into Dockerfiles
Args map[string]string
+ // Map of external additional build contexts
+ AdditionalBuildContexts map[string]*AdditionalBuildContext
// Name of the image to write to.
Output string
// BuildOutput specifies if any custom build output is selected for following build.
@@ -130,6 +151,12 @@ type BuildOptions struct {
// Additional tags to add to the image that we write, if we know of a
// way to add them.
AdditionalTags []string
+ // Logfile specifies if log output is redirected to an external file
+ // instead of stdout, stderr.
+ LogFile string
+ // LogByPlatform tells imagebuildah to split log to different log files
+ // for each platform if logging to external file was selected.
+ LogSplitByPlatform bool
// Log is a callback that will print a progress message. If no value
// is supplied, the message will be sent to Err (or os.Stderr, if Err
// is nil) by default.
@@ -187,6 +214,8 @@ type BuildOptions struct {
DropCapabilities []string
// CommonBuildOpts is *required*.
CommonBuildOpts *CommonBuildOptions
+ // CPPFlags are additional arguments to pass to the C Preprocessor (cpp).
+ CPPFlags []string
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
DefaultMountsFilePath string
// IIDFile tells the builder to write the image ID to the specified file
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
index 459a161cd..985558140 100644
--- a/vendor/github.com/containers/buildah/define/types.go
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -29,7 +29,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.26.1"
+ Version = "1.27.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
@@ -127,13 +127,18 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
return "", "", errors.Wrapf(err, "error parsing url %q", url)
}
if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") {
- combinedOutput, err := cloneToDirectory(url, name)
+ combinedOutput, gitSubDir, err := cloneToDirectory(url, name)
if err != nil {
if err2 := os.RemoveAll(name); err2 != nil {
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
}
return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput))
}
+ // Check if git url specifies any subdir
+ // if subdir is there switch to subdir.
+ if gitSubDir != "" {
+ name = filepath.Join(name, gitSubDir)
+ }
return name, "", nil
}
if strings.HasPrefix(url, "github.com/") {
@@ -170,17 +175,29 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
return "", "", errors.Errorf("unreachable code reached")
}
-func cloneToDirectory(url, dir string) ([]byte, error) {
- gitBranch := strings.Split(url, "#")
+func cloneToDirectory(url, dir string) ([]byte, string, error) {
+ gitSubdir := ""
+ gitBranch := ""
+ gitBranchPart := strings.Split(url, "#")
var cmd *exec.Cmd
- if len(gitBranch) < 2 {
- logrus.Debugf("cloning %q to %q", url, dir)
- cmd = exec.Command("git", "clone", url, dir)
+ if len(gitBranchPart) > 1 {
+ // check if string contains path to a subdir
+ gitSubDirPart := strings.Split(gitBranchPart[1], ":")
+ if len(gitSubDirPart) > 1 {
+ gitSubdir = gitSubDirPart[1]
+ }
+ gitBranch = gitSubDirPart[0]
+ }
+ if gitBranch == "" {
+ logrus.Debugf("cloning %q to %q", gitBranchPart[0], dir)
+ cmd = exec.Command("git", "clone", "--recurse-submodules", gitBranchPart[0], dir)
} else {
- logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir)
- cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir)
+ logrus.Debugf("cloning repo %q and branch %q to %q", gitBranchPart[0], gitBranch, dir)
+ cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch, gitBranchPart[0], dir)
}
- return cmd.CombinedOutput()
+
+ combinedOutput, err := cmd.CombinedOutput()
+ return combinedOutput, gitSubdir, err
}
func downloadToDirectory(url, dir string) error {
diff --git a/vendor/github.com/containers/buildah/define/types_unix.go b/vendor/github.com/containers/buildah/define/types_unix.go
index aedadad36..c57e29d97 100644
--- a/vendor/github.com/containers/buildah/define/types_unix.go
+++ b/vendor/github.com/containers/buildah/define/types_unix.go
@@ -6,4 +6,13 @@ import (
"github.com/opencontainers/runc/libcontainer/devices"
)
-type ContainerDevices = []devices.Device
+// BuildahDevice is a wrapper around devices.Device
+// with additional support for renaming a device
+// using bind-mount in rootless environments.
+type BuildahDevice struct {
+ devices.Device
+ Source string
+ Destination string
+}
+
+type ContainerDevices = []BuildahDevice
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index cd3d63a0f..1fc8c6016 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -1,47 +1,122 @@
module github.com/containers/buildah
-go 1.16
+go 1.17
require (
- github.com/containerd/containerd v1.6.4
- github.com/containernetworking/cni v1.1.0
- github.com/containers/common v0.48.0
- github.com/containers/image/v5 v5.21.1
+ github.com/containerd/containerd v1.6.6
+ github.com/containernetworking/cni v1.1.1
+ github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9
+ github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
- github.com/containers/storage v1.40.2
+ github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6
github.com/docker/distribution v2.8.1+incompatible
- github.com/docker/docker v20.10.14+incompatible
+ github.com/docker/docker v20.10.17+incompatible
github.com/docker/go-units v0.4.0
- github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
- github.com/fsouza/go-dockerclient v1.7.11
+ github.com/fsouza/go-dockerclient v1.8.1
github.com/ghodss/yaml v1.0.0
github.com/hashicorp/go-multierror v1.1.1
- github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
- github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/mattn/go-shellwords v1.0.12
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.19.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
- github.com/opencontainers/runc v1.1.1
+ github.com/opencontainers/runc v1.1.3
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.10.1
github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805
github.com/pkg/errors v0.9.1
- github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921
+ github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.4.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.1
+ github.com/stretchr/testify v1.7.2
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
+ golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
)
-replace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2
+require (
+ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+ github.com/BurntSushi/toml v1.1.0 // indirect
+ github.com/Microsoft/go-winio v0.5.2 // indirect
+ github.com/Microsoft/hcsshim v0.9.3 // indirect
+ github.com/VividCortex/ewma v1.2.0 // indirect
+ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blang/semver v3.5.1+incompatible // indirect
+ github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
+ github.com/containerd/cgroups v1.0.3 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
+ github.com/containernetworking/plugins v1.1.1 // indirect
+ github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
+ github.com/cyphar/filepath-securejoin v0.2.3 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/disiqueira/gotree/v3 v3.0.2 // indirect
+ github.com/docker/docker-credential-helpers v0.6.4 // indirect
+ github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect
+ github.com/docker/go-metrics v0.0.1 // indirect
+ github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.2 // indirect
+ github.com/google/go-intervals v0.0.2 // indirect
+ github.com/google/uuid v1.3.0 // indirect
+ github.com/gorilla/mux v1.8.0 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/imdario/mergo v0.3.12 // indirect
+ github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/jinzhu/copier v0.3.5 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/klauspost/compress v1.15.6 // indirect
+ github.com/klauspost/pgzip v1.2.5 // indirect
+ github.com/manifoldco/promptui v0.9.0 // indirect
+ github.com/mattn/go-runewidth v0.0.13 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/miekg/pkcs11 v1.1.1 // indirect
+ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
+ github.com/moby/sys/mount v0.3.3 // indirect
+ github.com/moby/sys/mountinfo v0.6.2 // indirect
+ github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/morikuni/aec v1.0.0 // indirect
+ github.com/nxadm/tail v1.4.8 // indirect
+ github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/proglottis/gpgme v0.1.2 // indirect
+ github.com/prometheus/client_golang v1.11.1 // indirect
+ github.com/prometheus/client_model v0.2.0 // indirect
+ github.com/prometheus/common v0.30.0 // indirect
+ github.com/prometheus/procfs v0.7.3 // indirect
+ github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
+ github.com/sylabs/sif/v2 v2.7.0 // indirect
+ github.com/tchap/go-patricia v2.3.0+incompatible // indirect
+ github.com/ulikunitz/xz v0.5.10 // indirect
+ github.com/vbatts/tar-split v0.11.2 // indirect
+ github.com/vbauerster/mpb/v7 v7.4.1 // indirect
+ github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 // indirect
+ github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
+ github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
+ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
+ github.com/xeipuuv/gojsonschema v1.2.0 // indirect
+ go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
+ go.opencensus.io v0.23.0 // indirect
+ golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
+ golang.org/x/text v0.3.7 // indirect
+ google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 // indirect
+ google.golang.org/grpc v1.44.0 // indirect
+ google.golang.org/protobuf v1.28.0 // indirect
+ gopkg.in/square/go-jose.v2 v2.5.1 // indirect
+ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/klog v1.0.0 // indirect
+)
replace github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index b73dc0069..61bffdb64 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -88,8 +88,9 @@ github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
+github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -164,6 +165,7 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -217,9 +219,8 @@ github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
-github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig=
-github.com/containerd/containerd v1.6.4 h1:SEDZBp10mhCp+hkO3Njz/YhGrI7ah3edNcUlRdUPOgg=
-github.com/containerd/containerd v1.6.4/go.mod h1:oWOqbuJUZmOVafhA0lj2NAXbiO1u7F0K5l1bUgdyo94=
+github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
+github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -239,8 +240,7 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
-github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
-github.com/containerd/go-cni v1.1.5/go.mod h1:Rf2ZrMycr1El589IyuRzn7RkfdRZVKaFGaxSDHVAjj0=
+github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
@@ -278,17 +278,18 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
-github.com/containernetworking/cni v1.1.0 h1:T00oIz4hef+/p9gpRZa57SnIN+QnbmAHBjbxaOSFo9U=
-github.com/containernetworking/cni v1.1.0/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k=
+github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
-github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw=
-github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
-github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q=
-github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw=
+github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9 h1:sK+TNC8oUBkruZTIqwYJrENetSLQnk+goBVyLiqsJq8=
+github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9/go.mod h1:WBLwq+i7bicCpH54V70HM6s7jqDAESTlYnd05XXp0ac=
+github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4/go.mod h1:OsX9sFexyGF0FCNAjfcVFv3IwMqDyLyV/WQY/roLPcE=
+github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471 h1:2mm1jEFATvpdFfp8lUB/yc237OqwruMvfIPiVn1Wpgg=
+github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471/go.mod h1:KntCBNQn3qOuZmQuJ38ORyTozmWXiuo05Vef2S0Sm5M=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -299,9 +300,10 @@ github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pA
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f h1:hffElEaoDQfREHltc2wtFPd68BqDmzW6KkEDpuSRBjs=
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
-github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
-github.com/containers/storage v1.40.2 h1:GUlHaGnrs1JOEwv6YEvkQdgYXOXZdU1Angy4wgWNgF8=
github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
+github.com/containers/storage v1.41.0/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s=
+github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6 h1:AWGEIiqWFIfzTIv4Q3k6vJt/EYyo8dh35ny7WhnOd0s=
+github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6/go.mod h1:6XQ68cEG8ojfP/m3HIupFV1rZsnqeFmaE8N1ctBP94Y=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -351,10 +353,11 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.14+incompatible h1:+T9/PRYWNDo5SZl5qS1r9Mo/0Q8AwxKKPtu9S1yxM0w=
-github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.15+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
+github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
@@ -368,8 +371,6 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4=
-github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
@@ -396,12 +397,13 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y=
-github.com/fsouza/go-dockerclient v1.7.11 h1:pRmGMANAl+tmr+IYNYq8IWWcSbiKQMSRumYLv8H5sfk=
-github.com/fsouza/go-dockerclient v1.7.11/go.mod h1:zvYxutUNOK853i1s7VywZxQgxSHbm7A6en/q9MHBN6k=
+github.com/fsouza/go-dockerclient v1.8.1 h1:a27vHYqNSZz88nUAurI1o6W5PgEt63nAWilOI+j63RE=
+github.com/fsouza/go-dockerclient v1.8.1/go.mod h1:zmA2ogSxRnXmbZcy0Aq7yhRoCdP/bDns/qghCK9SWtM=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
@@ -599,8 +601,6 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
-github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44=
-github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
@@ -631,19 +631,21 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
+github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -691,13 +693,15 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
+github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
+github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
-github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc=
github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
@@ -773,8 +777,11 @@ github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
-github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU=
+github.com/opencontainers/runc v1.1.1-0.20220607072441-a7a45d7d2721/go.mod h1:QvA0UNe48mC1JxcXq0sENIR38+/LdJMLNxuAvtFBhxA=
github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
+github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -811,8 +818,9 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ=
github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
+github.com/proglottis/gpgme v0.1.2 h1:dKlhDqJ0kdEt+YHCD8FQEUdF9cJj/+mbJUNyUGNAEzY=
+github.com/proglottis/gpgme v0.1.2/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -856,6 +864,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -868,14 +878,22 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y=
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
@@ -914,8 +932,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk=
github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
@@ -938,6 +957,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA=
@@ -1027,6 +1047,7 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -1169,6 +1190,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cO
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1268,14 +1290,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1488,8 +1513,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1525,8 +1551,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index e3668bd0d..3c7bea432 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -70,6 +70,7 @@ type containerImageRef struct {
annotations map[string]string
preferredManifestType string
squash bool
+ omitHistory bool
emptyLayer bool
idMappingOptions *define.IDMappingOptions
parent string
@@ -221,7 +222,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
oimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
- if i.squash {
+ if i.squash || i.omitHistory {
oimage.History = []v1.History{}
}
@@ -244,7 +245,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
// Only clear the history if we're squashing, otherwise leave it be so
// that we can append entries to it. Clear the parent, too, we no
// longer include its layers and history.
- if i.squash {
+ if i.squash || i.omitHistory {
dimage.Parent = ""
dimage.History = []docker.V2S2History{}
}
@@ -530,43 +531,56 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
dimage.History = append(dimage.History, dnews)
}
}
- appendHistory(i.preEmptyLayers)
- created := time.Now().UTC()
- if i.created != nil {
- created = (*i.created).UTC()
- }
- comment := i.historyComment
- // Add a comment for which base image is being used
- if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
- comment += "FROM " + i.fromImageName
- }
- onews := v1.History{
- Created: &created,
- CreatedBy: i.createdBy,
- Author: oimage.Author,
- Comment: comment,
- EmptyLayer: i.emptyLayer,
- }
- oimage.History = append(oimage.History, onews)
- dnews := docker.V2S2History{
- Created: created,
- CreatedBy: i.createdBy,
- Author: dimage.Author,
- Comment: comment,
- EmptyLayer: i.emptyLayer,
- }
- dimage.History = append(dimage.History, dnews)
- appendHistory(i.postEmptyLayers)
-
- // Sanity check that we didn't just create a mismatch between non-empty layers in the
- // history and the number of diffIDs.
- expectedDiffIDs := expectedOCIDiffIDs(oimage)
- if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
- return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
- }
- expectedDiffIDs = expectedDockerDiffIDs(dimage)
- if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
- return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
+
+ // Calculate base image history for special scenarios
+ // when base layers does not contains any history.
+ // We will ignore sanity checks if baseImage history is null
+ // but still add new history for docker parity.
+ baseImageHistoryLen := len(oimage.History)
+ // Only attempt to append history if history was not disabled explicitly.
+ if !i.omitHistory {
+ appendHistory(i.preEmptyLayers)
+ created := time.Now().UTC()
+ if i.created != nil {
+ created = (*i.created).UTC()
+ }
+ comment := i.historyComment
+ // Add a comment for which base image is being used
+ if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
+ comment += "FROM " + i.fromImageName
+ }
+ onews := v1.History{
+ Created: &created,
+ CreatedBy: i.createdBy,
+ Author: oimage.Author,
+ Comment: comment,
+ EmptyLayer: i.emptyLayer,
+ }
+ oimage.History = append(oimage.History, onews)
+ dnews := docker.V2S2History{
+ Created: created,
+ CreatedBy: i.createdBy,
+ Author: dimage.Author,
+ Comment: comment,
+ EmptyLayer: i.emptyLayer,
+ }
+ dimage.History = append(dimage.History, dnews)
+ appendHistory(i.postEmptyLayers)
+
+ // Sanity check that we didn't just create a mismatch between non-empty layers in the
+ // history and the number of diffIDs. Following sanity check is ignored if build history
+ // is disabled explicitly by the user.
+ // Disable sanity check when baseImageHistory is null for docker parity
+ if baseImageHistoryLen != 0 {
+ expectedDiffIDs := expectedOCIDiffIDs(oimage)
+ if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
+ return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
+ }
+ expectedDiffIDs = expectedDockerDiffIDs(dimage)
+ if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
+ return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
+ }
+ }
}
// Encode the image configuration blob.
@@ -819,6 +833,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
annotations: b.Annotations(),
preferredManifestType: manifestType,
squash: options.Squash,
+ omitHistory: options.OmitHistory,
emptyLayer: options.EmptyLayer && !options.Squash,
idMappingOptions: &b.IDMappingOptions,
parent: parent,
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index cf0a7cfba..95bdc54ed 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -28,6 +28,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/hashicorp/go-multierror"
+ "github.com/mattn/go-shellwords"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
@@ -157,7 +158,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
// pre-process Dockerfiles with ".in" suffix
if strings.HasSuffix(dfile, ".in") {
- pData, err := preprocessContainerfileContents(logger, dfile, data, options.ContextDirectory)
+ pData, err := preprocessContainerfileContents(logger, dfile, data, options.ContextDirectory, options.CPPFlags)
if err != nil {
return "", nil, err
}
@@ -211,7 +212,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
if options.AllPlatforms {
- options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.SystemContext)
+ if options.AdditionalBuildContexts == nil {
+ options.AdditionalBuildContexts = make(map[string]*define.AdditionalBuildContext)
+ }
+ options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.AdditionalBuildContexts, options.SystemContext)
if err != nil {
return "", nil, err
}
@@ -249,7 +253,25 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
platformOptions.Args = argsCopy
builds.Go(func() error {
- thisID, thisRef, err := buildDockerfilesOnce(ctx, store, logger, logPrefix, platformOptions, paths, files)
+ loggerPerPlatform := logger
+ if platformOptions.LogFile != "" && platformOptions.LogSplitByPlatform {
+ logFile := platformOptions.LogFile + "_" + platformOptions.OS + "_" + platformOptions.Architecture
+ f, err := os.OpenFile(logFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+ if err != nil {
+ return errors.Wrapf(err, "opening logfile: %q", logFile)
+ }
+ defer f.Close()
+ loggerPerPlatform = logrus.New()
+ loggerPerPlatform.SetOutput(f)
+ loggerPerPlatform.SetLevel(logrus.GetLevel())
+ stdout := f
+ stderr := f
+ reporter := f
+ platformOptions.Out = stdout
+ platformOptions.ReportWriter = reporter
+ platformOptions.Err = stderr
+ }
+ thisID, thisRef, err := buildDockerfilesOnce(ctx, store, loggerPerPlatform, logPrefix, platformOptions, paths, files)
if err != nil {
return err
}
@@ -467,7 +489,7 @@ func warnOnUnsetBuildArgs(logger *logrus.Logger, node *parser.Node, args map[str
// preprocessContainerfileContents runs CPP(1) in preprocess-only mode on the input
// dockerfile content and will use ctxDir as the base include path.
-func preprocessContainerfileContents(logger *logrus.Logger, containerfile string, r io.Reader, ctxDir string) (stdout io.Reader, err error) {
+func preprocessContainerfileContents(logger *logrus.Logger, containerfile string, r io.Reader, ctxDir string, cppFlags []string) (stdout io.Reader, err error) {
cppCommand := "cpp"
cppPath, err := exec.LookPath(cppCommand)
if err != nil {
@@ -480,7 +502,16 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
stdoutBuffer := bytes.Buffer{}
stderrBuffer := bytes.Buffer{}
- cmd := exec.Command(cppPath, "-E", "-iquote", ctxDir, "-traditional", "-undef", "-")
+ cppArgs := []string{"-E", "-iquote", ctxDir, "-traditional", "-undef", "-"}
+ if flags, ok := os.LookupEnv("BUILDAH_CPPFLAGS"); ok {
+ args, err := shellwords.Parse(flags)
+ if err != nil {
+ return nil, errors.Errorf("error parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
+ }
+ cppArgs = append(cppArgs, args...)
+ }
+ cppArgs = append(cppArgs, cppFlags...)
+ cmd := exec.Command(cppPath, cppArgs...)
cmd.Stdin = r
cmd.Stdout = &stdoutBuffer
cmd.Stderr = &stderrBuffer
@@ -502,8 +533,8 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
// platformsForBaseImages resolves the names of base images from the
// dockerfiles, and if they are all valid references to manifest lists, returns
// the list of platforms that are supported by all of the base images.
-func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) {
- baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args)
+func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) {
+ baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args, additionalBuildContext)
if err != nil {
return nil, errors.Wrapf(err, "determining list of base images")
}
@@ -631,7 +662,7 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
// stage's base image with FROM, and returns the list of base images as
// provided. Each entry in the dockerfilenames slice corresponds to a slice in
// dockerfilecontents.
-func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string) ([]string, error) {
+func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext) ([]string, error) {
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
if err != nil {
return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0])
@@ -670,6 +701,13 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
child.Next.Value = from
from = ""
}
+ if replaceBuildContext, ok := additionalBuildContext[child.Next.Value]; ok {
+ if replaceBuildContext.IsImage {
+ child.Next.Value = replaceBuildContext.Value
+ } else {
+ return nil, fmt.Errorf("build context %q is not an image, can not be used for FROM %q", child.Next.Value, child.Next.Value)
+ }
+ }
base := child.Next.Value
if base != "scratch" && !nicknames[base] {
// TODO: this didn't undergo variable and arg
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index 6b63b5162..a33e1ffdd 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -126,6 +126,7 @@ type Executor struct {
imageInfoLock sync.Mutex
imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
fromOverride string
+ additionalBuildContexts map[string]*define.AdditionalBuildContext
manifest string
secrets map[string]define.Secret
sshsources map[string]*sshagent.Source
@@ -275,6 +276,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
rusageLogFile: rusageLogFile,
imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs),
fromOverride: options.From,
+ additionalBuildContexts: options.AdditionalBuildContexts,
manifest: options.Manifest,
secrets: secrets,
sshsources: sshsources,
@@ -609,6 +611,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
base := child.Next.Value
if base != "scratch" {
+ if replaceBuildContext, ok := b.additionalBuildContexts[child.Next.Value]; ok {
+ if replaceBuildContext.IsImage {
+ child.Next.Value = replaceBuildContext.Value
+ base = child.Next.Value
+ }
+ }
userArgs := argsMapToSlice(stage.Builder.Args)
baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 01b70369b..576ae5ed9 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -369,18 +369,73 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
if fromErr != nil {
return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
}
- if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
- return err
- }
- if other, ok := s.executor.stages[from]; ok && other.index < s.index {
- contextDir = other.mountPoint
- idMappingOptions = &other.builder.IDMappingOptions
- } else if builder, ok := s.executor.containerMap[copy.From]; ok {
- contextDir = builder.MountPoint
- idMappingOptions = &builder.IDMappingOptions
+ var additionalBuildContext *define.AdditionalBuildContext
+ if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ additionalBuildContext = foundContext
} else {
- return errors.Errorf("the stage %q has not been built", copy.From)
+ // Maybe index is given in COPY --from=index
+ // if that's the case check if provided index
+ // exists and if stage short_name matches any
+ // additionalContext replace stage with additional
+ // build context.
+ if _, err := strconv.Atoi(from); err == nil {
+ if stage, ok := s.executor.stages[from]; ok {
+ if foundContext, ok := s.executor.additionalBuildContexts[stage.name]; ok {
+ additionalBuildContext = foundContext
+ }
+ }
+ }
+ }
+ if additionalBuildContext != nil {
+ if !additionalBuildContext.IsImage {
+ contextDir = additionalBuildContext.Value
+ if additionalBuildContext.IsURL {
+ // Check if following buildContext was already
+ // downloaded before in any other RUN step. If not
+ // download it and populate DownloadCache field for
+ // future RUN steps.
+ if additionalBuildContext.DownloadedCache == "" {
+ // additional context contains a tar file
+ // so download and explode tar to buildah
+ // temp and point context to that.
+ path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
+ if err != nil {
+ return errors.Wrapf(err, "unable to download context from external source %q", additionalBuildContext.Value)
+ }
+ // point context dir to the extracted path
+ contextDir = filepath.Join(path, subdir)
+ // populate cache for next RUN step
+ additionalBuildContext.DownloadedCache = contextDir
+ } else {
+ contextDir = additionalBuildContext.DownloadedCache
+ }
+ }
+ } else {
+ copy.From = additionalBuildContext.Value
+ }
}
+ if additionalBuildContext == nil {
+ if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
+ return err
+ }
+ if other, ok := s.executor.stages[from]; ok && other.index < s.index {
+ contextDir = other.mountPoint
+ idMappingOptions = &other.builder.IDMappingOptions
+ } else if builder, ok := s.executor.containerMap[copy.From]; ok {
+ contextDir = builder.MountPoint
+ idMappingOptions = &builder.IDMappingOptions
+ } else {
+ return errors.Errorf("the stage %q has not been built", copy.From)
+ }
+ } else if additionalBuildContext.IsImage {
+ // Image was selected as additionalContext so only process image.
+ mountPoint, err := s.getImageRootfs(s.ctx, copy.From)
+ if err != nil {
+ return err
+ }
+ contextDir = mountPoint
+ }
+ // Original behaviour of buildah still stays true for COPY irrespective of additional context.
preserveOwnership = true
copyExcludes = excludes
} else {
@@ -446,6 +501,55 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
if fromErr != nil {
return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1])
}
+ // If additional buildContext contains this
+ // give priority to that and break if additional
+ // is not an external image.
+ if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ if additionalBuildContext.IsImage {
+ mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value)
+ if err != nil {
+ return nil, errors.Errorf("%s from=%s: image found with that name", flag, from)
+ }
+ // The `from` in stageMountPoints should point
+ // to `mountPoint` replaced from additional
+ // build-context. Reason: Parser will use this
+ // `from` to refer from stageMountPoints map later.
+ stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint}
+ break
+ } else {
+ // Most likely this points to path on filesystem
+ // or external tar archive, Treat it as a stage
+ // nothing is different for this. So process and
+ // point mountPoint to path on host and it will
+ // be automatically handled correctly by since
+ // GetBindMount will honor IsStage:false while
+ // processing stageMountPoints.
+ mountPoint := additionalBuildContext.Value
+ if additionalBuildContext.IsURL {
+ // Check if following buildContext was already
+ // downloaded before in any other RUN step. If not
+ // download it and populate DownloadCache field for
+ // future RUN steps.
+ if additionalBuildContext.DownloadedCache == "" {
+ // additional context contains a tar file
+ // so download and explode tar to buildah
+ // temp and point context to that.
+ path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to download context from external source %q", additionalBuildContext.Value)
+ }
+ // point context dir to the extracted path
+ mountPoint = filepath.Join(path, subdir)
+ // populate cache for next RUN step
+ additionalBuildContext.DownloadedCache = mountPoint
+ } else {
+ mountPoint = additionalBuildContext.DownloadedCache
+ }
+ }
+ stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: mountPoint}
+ break
+ }
+ }
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
@@ -493,30 +597,30 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
stdin = devNull
}
options := buildah.RunOptions{
- Logger: s.executor.logger,
- Hostname: config.Hostname,
- Runtime: s.executor.runtime,
Args: s.executor.runtimeArgs,
+ Cmd: config.Cmd,
+ ContextDir: s.executor.contextDir,
+ Entrypoint: config.Entrypoint,
+ Env: config.Env,
+ Hostname: config.Hostname,
+ Logger: s.executor.logger,
+ Mounts: append([]Mount{}, s.executor.transientMounts...),
+ NamespaceOptions: s.executor.namespaceOptions,
NoHosts: s.executor.noHosts,
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
- Mounts: append([]Mount{}, s.executor.transientMounts...),
- Env: config.Env,
- User: config.User,
- WorkingDir: config.WorkingDir,
- Entrypoint: config.Entrypoint,
- ContextDir: s.executor.contextDir,
- Cmd: config.Cmd,
- Stdin: stdin,
- Stdout: s.executor.out,
- Stderr: s.executor.err,
Quiet: s.executor.quiet,
- NamespaceOptions: s.executor.namespaceOptions,
- Terminal: buildah.WithoutTerminal,
+ RunMounts: run.Mounts,
+ Runtime: s.executor.runtime,
Secrets: s.executor.secrets,
SSHSources: s.executor.sshsources,
- RunMounts: run.Mounts,
StageMountPoints: stageMountPoints,
+ Stderr: s.executor.err,
+ Stdin: stdin,
+ Stdout: s.executor.out,
SystemContext: s.executor.systemContext,
+ Terminal: buildah.WithoutTerminal,
+ User: config.User,
+ WorkingDir: config.WorkingDir,
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
@@ -865,14 +969,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// squash the contents of the base image. Whichever is
// the case, we need to commit() to create a new image.
logCommit(s.output, -1)
- if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output); err != nil {
+ if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil {
return "", nil, errors.Wrapf(err, "error committing base container")
}
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
// The image would be modified by the labels passed
// via the command line, so we need to commit.
logCommit(s.output, -1)
- if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output); err != nil {
+ if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash); err != nil {
return "", nil, err
}
} else {
@@ -923,6 +1027,25 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if fromErr != nil {
return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
}
+ // If additional buildContext contains this
+ // give priority to that and break if additional
+ // is not an external image.
+ if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ if !additionalBuildContext.IsImage {
+ // We don't need to pull this
+ // since this additional context
+ // is not an image.
+ break
+ } else {
+ // replace with image set in build context
+ from = additionalBuildContext.Value
+ if _, err := s.getImageRootfs(ctx, from); err != nil {
+ return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
+ }
+ break
+ }
+ }
+
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
@@ -984,7 +1107,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
- imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output)
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
@@ -1018,7 +1141,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// we need to call ib.Run() to correctly put the args together before
// determining if a cached layer with the same build args already exists
// and that is done in the if block below.
- if checkForLayers && step.Command != "arg" {
+ if checkForLayers && step.Command != "arg" && !(s.executor.squash && lastInstruction && lastStage) {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
@@ -1071,10 +1194,6 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
}
- // We want to save history for other layers during a squashed build.
- // Toggle flag allows executor to treat other instruction and layers
- // as regular builds and only perform squashing at last
- squashToggle := false
// Note: If the build has squash, we must try to re-use as many layers as possible if cache is found.
// So only perform commit if its the lastInstruction of lastStage.
if cacheID != "" {
@@ -1091,30 +1210,27 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
}
} else {
- if s.executor.squash {
- // We want to save history for other layers during a squashed build.
- // squashToggle flag allows executor to treat other instruction and layers
- // as regular builds and only perform squashing at last
- s.executor.squash = false
- squashToggle = true
- }
// We're not going to find any more cache hits, so we
// can stop looking for them.
checkForLayers = false
// Create a new image, maybe with a new layer, with the
// name for this stage if it's the last instruction.
logCommit(s.output, i)
- imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
+ // While committing we always set squash to false here
+ // because at this point we want to save history for
+ // layers even if its a squashed build so that they
+ // can be part of build-cache.
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
}
- // Perform final squash for this build as we are one the,
- // last instruction of last stage
- if (s.executor.squash || squashToggle) && lastInstruction && lastStage {
- s.executor.squash = true
- imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
+ // Create a squashed version of this image
+ // if we're supposed to create one and this
+ // is the last instruction of the last stage.
+ if s.executor.squash && lastInstruction && lastStage {
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step)
}
@@ -1450,7 +1566,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
// commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise.
// or commit via any custom exporter if specified.
-func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) {
+func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash bool) (string, reference.Canonical, error) {
ib := s.stage.Builder
var buildOutputOption define.BuildOutputOption
if s.executor.buildOutput != "" {
@@ -1591,7 +1707,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
ReportWriter: writer,
PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext,
- Squash: s.executor.squash,
+ Squash: squash,
+ OmitHistory: s.executor.commonBuildOptions.OmitHistory,
EmptyLayer: emptyLayer,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
index 02a81be6f..4bd6aa821 100644
--- a/vendor/github.com/containers/buildah/install.md
+++ b/vendor/github.com/containers/buildah/install.md
@@ -52,9 +52,9 @@ rpm-ostree install buildah
Note: [`podman`](https://podman.io) build is available by default.
### [Gentoo](https://www.gentoo.org)
-
+[app-containers/podman](https://packages.gentoo.org/packages/app-containers/podman)
```bash
-sudo emerge app-emulation/libpod
+sudo emerge app-containers/podman
```
### [openSUSE](https://www.opensuse.org)
@@ -396,9 +396,9 @@ cat /etc/containers/policy.json
## Debug with Delve and the like
-To make a source debug build without optimizations use `DEBUG=1`, like:
+To make a source debug build without optimizations use `BUILDDEBUG=1`, like:
```
-make all DEBUG=1
+make all BUILDDEBUG=1
```
## Vendoring
diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go
index ec4638215..1c736cdf1 100644
--- a/vendor/github.com/containers/buildah/internal/parse/parse.go
+++ b/vendor/github.com/containers/buildah/internal/parse/parse.go
@@ -309,7 +309,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// add subdirectory if specified
// cache parent directory
- cacheParent := filepath.Join(getTempDir(), BuildahCacheDir)
+ cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir)
// create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755))
if err != nil {
@@ -597,12 +597,3 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
return newMount, nil
}
-
-/* This is internal function and could be changed at any time */
-/* for external usage please refer to buildah/pkg/parse.GetTempDir() */
-func getTempDir() string {
- if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
- return tmpdir
- }
- return "/var/tmp"
-}
diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go
index 8ddff99fb..3b1c10623 100644
--- a/vendor/github.com/containers/buildah/internal/types.go
+++ b/vendor/github.com/containers/buildah/internal/types.go
@@ -1,5 +1,11 @@
package internal
+const (
+ // Temp directory which stores external artifacts which are download for a build.
+ // Example: tar files from external sources.
+ BuildahExternalArtifactsDir = "buildah-external-artifacts"
+)
+
// Types is internal packages are suspected to change with releases avoid using these outside of buildah
// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor
diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go
index 691d89d65..abaadc616 100644
--- a/vendor/github.com/containers/buildah/internal/util/util.go
+++ b/vendor/github.com/containers/buildah/internal/util/util.go
@@ -8,6 +8,8 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/common/libimage"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ enchelpers "github.com/containers/ocicrypt/helpers"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
@@ -32,6 +34,14 @@ func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*
return localImage, nil
}
+// GetTempDir returns base for a temporary directory on host.
+func GetTempDir() string {
+ if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
+ return tmpdir
+ }
+ return "/var/tmp"
+}
+
// ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout.
func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
var err error
@@ -79,3 +89,49 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
}
return nil
}
+
+// DecryptConfig translates decryptionKeys into a DescriptionConfig structure
+func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
+ decryptConfig := &encconfig.DecryptConfig{}
+ if len(decryptionKeys) > 0 {
+ // decryption
+ dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid decryption keys")
+ }
+ cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
+ decryptConfig = cc.DecryptConfig
+ }
+
+ return decryptConfig, nil
+}
+
+// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure
+func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) {
+ var encLayers *[]int
+ var encConfig *encconfig.EncryptConfig
+
+ if len(encryptionKeys) > 0 {
+ // encryption
+ encLayers = &encryptLayers
+ ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "invalid encryption keys")
+ }
+ cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
+ encConfig = cc.EncryptConfig
+ }
+ return encConfig, encLayers, nil
+}
+
+// GetFormat translates format string into either docker or OCI format constant
+func GetFormat(format string) (string, error) {
+ switch format {
+ case define.OCI:
+ return define.OCIv1ImageManifest, nil
+ case define.DOCKER:
+ return define.Dockerv2ImageManifest, nil
+ default:
+ return "", errors.Errorf("unrecognized image type %q", format)
+ }
+}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/build.go b/vendor/github.com/containers/buildah/pkg/cli/build.go
new file mode 100644
index 000000000..396a9e74e
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/cli/build.go
@@ -0,0 +1,373 @@
+package cli
+
+// the cli package contains urfave/cli related structs that help make up
+// the command line for buildah commands. it resides here so other projects
+// that vendor in this code can use them too.
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/define"
+ iutil "github.com/containers/buildah/internal/util"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/pkg/util"
+ "github.com/containers/common/pkg/auth"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type BuildOptions struct {
+ *LayerResults
+ *BudResults
+ *UserNSResults
+ *FromAndBudResults
+ *NameSpaceResults
+ Logwriter *os.File
+}
+
+const (
+ MaxPullPushRetries = 3
+ PullPushRetryDelay = 2 * time.Second
+)
+
+// GenBuildOptions translates command line flags into a BuildOptions structure
+func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (define.BuildOptions, []string, []string, error) {
+ options := define.BuildOptions{}
+
+ var removeAll []string
+
+ output := ""
+ cleanTmpFile := false
+ tags := []string{}
+ if c.Flag("tag").Changed {
+ tags = iopts.Tag
+ if len(tags) > 0 {
+ output = tags[0]
+ tags = tags[1:]
+ }
+ if c.Flag("manifest").Changed {
+ for _, tag := range tags {
+ if tag == iopts.Manifest {
+ return options, nil, nil, errors.New("the same name must not be specified for both '--tag' and '--manifest'")
+ }
+ }
+ }
+ }
+ if err := auth.CheckAuthFile(iopts.BudResults.Authfile); err != nil {
+ return options, nil, nil, err
+ }
+
+ if c.Flag("logsplit").Changed {
+ if !c.Flag("logfile").Changed {
+ return options, nil, nil, errors.Errorf("cannot use --logsplit without --logfile")
+ }
+ }
+
+ iopts.BudResults.Authfile, cleanTmpFile = util.MirrorToTempFileIfPathIsDescriptor(iopts.BudResults.Authfile)
+ if cleanTmpFile {
+ removeAll = append(removeAll, iopts.BudResults.Authfile)
+ }
+
+ // Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always
+ // --pull-always and --pull-never. The --pull-never and --pull-always options
+ // will not be documented.
+ pullPolicy := define.PullIfMissing
+ if strings.EqualFold(strings.TrimSpace(iopts.Pull), "true") {
+ pullPolicy = define.PullIfNewer
+ }
+ if iopts.PullAlways || strings.EqualFold(strings.TrimSpace(iopts.Pull), "always") {
+ pullPolicy = define.PullAlways
+ }
+ if iopts.PullNever || strings.EqualFold(strings.TrimSpace(iopts.Pull), "never") {
+ pullPolicy = define.PullNever
+ }
+ logrus.Debugf("Pull Policy for pull [%v]", pullPolicy)
+
+ args := make(map[string]string)
+ if c.Flag("build-arg").Changed {
+ for _, arg := range iopts.BuildArg {
+ av := strings.SplitN(arg, "=", 2)
+ if len(av) > 1 {
+ args[av[0]] = av[1]
+ } else {
+ // check if the env is set in the local environment and use that value if it is
+ if val, present := os.LookupEnv(av[0]); present {
+ args[av[0]] = val
+ } else {
+ delete(args, av[0])
+ }
+ }
+ }
+ }
+
+ additionalBuildContext := make(map[string]*define.AdditionalBuildContext)
+ if c.Flag("build-context").Changed {
+ for _, contextString := range iopts.BuildContext {
+ av := strings.SplitN(contextString, "=", 2)
+ if len(av) > 1 {
+ parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1])
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "while parsing additional build context")
+ }
+ additionalBuildContext[av[0]] = &parseAdditionalBuildContext
+ } else {
+ return options, nil, nil, fmt.Errorf("while parsing additional build context: %q, accepts value in the form of key=value", av)
+ }
+ }
+ }
+
+ containerfiles := getContainerfiles(iopts.File)
+ format, err := iutil.GetFormat(iopts.Format)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ layers := UseLayers()
+ if c.Flag("layers").Changed {
+ layers = iopts.Layers
+ }
+ contextDir := ""
+ cliArgs := inputArgs
+
+ // Nothing provided, we assume the current working directory as build
+ // context
+ if len(cliArgs) == 0 {
+ contextDir, err = os.Getwd()
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "unable to choose current working directory as build context")
+ }
+ } else {
+ // The context directory could be a URL. Try to handle that.
+ tempDir, subDir, err := define.TempDirForURL("", "buildah", cliArgs[0])
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "error prepping temporary context directory")
+ }
+ if tempDir != "" {
+ // We had to download it to a temporary directory.
+ // Delete it later.
+ removeAll = append(removeAll, tempDir)
+ contextDir = filepath.Join(tempDir, subDir)
+ } else {
+ // Nope, it was local. Use it as is.
+ absDir, err := filepath.Abs(cliArgs[0])
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "error determining path to directory")
+ }
+ contextDir = absDir
+ }
+ }
+
+ if len(containerfiles) == 0 {
+ // Try to find the Containerfile/Dockerfile within the contextDir
+ containerfile, err := util.DiscoverContainerfile(contextDir)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ containerfiles = append(containerfiles, containerfile)
+ contextDir = filepath.Dir(containerfile)
+ }
+
+ contextDir, err = filepath.EvalSymlinks(contextDir)
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "error evaluating symlinks in build context path")
+ }
+
+ var stdin io.Reader
+ if iopts.Stdin {
+ stdin = os.Stdin
+ }
+
+ var stdout, stderr, reporter *os.File
+ stdout = os.Stdout
+ stderr = os.Stderr
+ reporter = os.Stderr
+ if iopts.Logwriter != nil {
+ logrus.SetOutput(iopts.Logwriter)
+ stdout = iopts.Logwriter
+ stderr = iopts.Logwriter
+ reporter = iopts.Logwriter
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "error building system context")
+ }
+
+ isolation, err := parse.IsolationOption(iopts.Isolation)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ runtimeFlags := []string{}
+ for _, arg := range iopts.RuntimeFlags {
+ runtimeFlags = append(runtimeFlags, "--"+arg)
+ }
+
+ commonOpts, err := parse.CommonBuildOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ pullFlagsCount := 0
+ if c.Flag("pull").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-always").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-never").Changed {
+ pullFlagsCount++
+ }
+
+ if pullFlagsCount > 1 {
+ return options, nil, nil, errors.Errorf("can only set one of 'pull' or 'pull-always' or 'pull-never'")
+ }
+
+ if (c.Flag("rm").Changed || c.Flag("force-rm").Changed) && (!c.Flag("layers").Changed && !c.Flag("no-cache").Changed) {
+ return options, nil, nil, errors.Errorf("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
+ }
+
+ if c.Flag("cache-from").Changed {
+ logrus.Debugf("build --cache-from not enabled, has no effect")
+ }
+
+ if c.Flag("compress").Changed {
+ logrus.Debugf("--compress option specified but is ignored")
+ }
+
+ compression := define.Gzip
+ if iopts.DisableCompression {
+ compression = define.Uncompressed
+ }
+
+ if c.Flag("disable-content-trust").Changed {
+ logrus.Debugf("--disable-content-trust option specified but is ignored")
+ }
+
+ namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation)
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "error parsing ID mapping options")
+ }
+ namespaceOptions.AddOrReplace(usernsOption...)
+
+ platforms, err := parse.PlatformsFromOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ decryptConfig, err := iutil.DecryptConfig(iopts.DecryptionKeys)
+ if err != nil {
+ return options, nil, nil, errors.Wrapf(err, "unable to obtain decrypt config")
+ }
+
+ var excludes []string
+ if iopts.IgnoreFile != "" {
+ if excludes, _, err = parse.ContainerIgnoreFile(contextDir, iopts.IgnoreFile); err != nil {
+ return options, nil, nil, err
+ }
+ }
+ var timestamp *time.Time
+ if c.Flag("timestamp").Changed {
+ t := time.Unix(iopts.Timestamp, 0).UTC()
+ timestamp = &t
+ }
+ if c.Flag("output").Changed {
+ buildOption, err := parse.GetBuildOutput(iopts.BuildOutput)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ if buildOption.IsStdout {
+ iopts.Quiet = true
+ }
+ }
+ options = define.BuildOptions{
+ AddCapabilities: iopts.CapAdd,
+ AdditionalBuildContexts: additionalBuildContext,
+ AdditionalTags: tags,
+ AllPlatforms: iopts.AllPlatforms,
+ Annotations: iopts.Annotation,
+ Architecture: systemContext.ArchitectureChoice,
+ Args: args,
+ BlobDirectory: iopts.BlobCache,
+ BuildOutput: iopts.BuildOutput,
+ CNIConfigDir: iopts.CNIConfigDir,
+ CNIPluginPath: iopts.CNIPlugInPath,
+ CPPFlags: iopts.CPPFlags,
+ CommonBuildOpts: commonOpts,
+ Compression: compression,
+ ConfigureNetwork: networkPolicy,
+ ContextDirectory: contextDir,
+ Devices: iopts.Devices,
+ DropCapabilities: iopts.CapDrop,
+ Envs: iopts.Envs,
+ Err: stderr,
+ Excludes: excludes,
+ ForceRmIntermediateCtrs: iopts.ForceRm,
+ From: iopts.From,
+ IDMappingOptions: idmappingOptions,
+ IIDFile: iopts.Iidfile,
+ IgnoreFile: iopts.IgnoreFile,
+ In: stdin,
+ Isolation: isolation,
+ Jobs: &iopts.Jobs,
+ Labels: iopts.Label,
+ Layers: layers,
+ LogFile: iopts.Logfile,
+ LogRusage: iopts.LogRusage,
+ LogSplitByPlatform: iopts.LogSplitByPlatform,
+ Manifest: iopts.Manifest,
+ MaxPullPushRetries: MaxPullPushRetries,
+ NamespaceOptions: namespaceOptions,
+ NoCache: iopts.NoCache,
+ OS: systemContext.OSChoice,
+ OSFeatures: iopts.OSFeatures,
+ OSVersion: iopts.OSVersion,
+ OciDecryptConfig: decryptConfig,
+ Out: stdout,
+ Output: output,
+ OutputFormat: format,
+ Platforms: platforms,
+ PullPolicy: pullPolicy,
+ PullPushRetryDelay: PullPushRetryDelay,
+ Quiet: iopts.Quiet,
+ RemoveIntermediateCtrs: iopts.Rm,
+ ReportWriter: reporter,
+ Runtime: iopts.Runtime,
+ RuntimeArgs: runtimeFlags,
+ RusageLogFile: iopts.RusageLogFile,
+ SignBy: iopts.SignBy,
+ SignaturePolicyPath: iopts.SignaturePolicy,
+ Squash: iopts.Squash,
+ SystemContext: systemContext,
+ Target: iopts.Target,
+ Timestamp: timestamp,
+ TransientMounts: iopts.Volumes,
+ UnsetEnvs: iopts.UnsetEnvs,
+ }
+ if iopts.Quiet {
+ options.ReportWriter = ioutil.Discard
+ }
+ return options, containerfiles, removeAll, nil
+}
+
+func getContainerfiles(files []string) []string {
+ var containerfiles []string
+ for _, f := range files {
+ if f == "-" {
+ containerfiles = append(containerfiles, "/dev/stdin")
+ } else {
+ containerfiles = append(containerfiles, f)
+ }
+ }
+ return containerfiles
+}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 98fa4fbc0..695aba7fb 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -53,10 +53,12 @@ type BudResults struct {
Annotation []string
Authfile string
BuildArg []string
+ BuildContext []string
CacheFrom string
CertDir string
Compress bool
Creds string
+ CPPFlags []string
DisableCompression bool
DisableContentTrust bool
IgnoreFile string
@@ -66,10 +68,12 @@ type BudResults struct {
Iidfile string
Label []string
Logfile string
+ LogSplitByPlatform bool
Manifest string
NoHosts bool
NoCache bool
Timestamp int64
+ OmitHistory bool
Pull string
PullAlways bool
PullNever bool
@@ -191,9 +195,11 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "set metadata for an image (default [])")
fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
+ fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")
+ fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)")
fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "this is a Docker specific option and is a NOOP")
@@ -206,6 +212,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
fs.StringArrayVar(&flags.Label, "label", []string{}, "set metadata for an image (default [])")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
+ fs.BoolVar(&flags.LogSplitByPlatform, "logsplit", false, "split logfile to different files for each platform")
fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden")
if err := fs.MarkHidden("loglevel"); err != nil {
panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err))
@@ -235,7 +242,8 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err))
}
fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
- fs.BoolVar(&flags.IdentityLabel, "identity-label", true, "add default identity label (default true)")
+ fs.BoolVar(&flags.OmitHistory, "omit-history", false, "omit build history information from built image")
+ fs.BoolVar(&flags.IdentityLabel, "identity-label", true, "add default identity label")
fs.BoolVar(&flags.Rm, "rm", true, "remove intermediate containers after a successful build")
// "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go.
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
@@ -261,17 +269,19 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
// GetBudFlagsCompletions returns the FlagCompletions for the common build flags
func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
- flagCompletion["arch"] = commonComp.AutocompleteNone
flagCompletion["annotation"] = commonComp.AutocompleteNone
+ flagCompletion["arch"] = commonComp.AutocompleteNone
flagCompletion["authfile"] = commonComp.AutocompleteDefault
flagCompletion["build-arg"] = commonComp.AutocompleteNone
+ flagCompletion["build-context"] = commonComp.AutocompleteNone
flagCompletion["cache-from"] = commonComp.AutocompleteNone
flagCompletion["cert-dir"] = commonComp.AutocompleteDefault
+ flagCompletion["cpp-flag"] = commonComp.AutocompleteNone
flagCompletion["creds"] = commonComp.AutocompleteNone
flagCompletion["env"] = commonComp.AutocompleteNone
flagCompletion["file"] = commonComp.AutocompleteDefault
- flagCompletion["from"] = commonComp.AutocompleteDefault
flagCompletion["format"] = commonComp.AutocompleteNone
+ flagCompletion["from"] = commonComp.AutocompleteDefault
flagCompletion["ignorefile"] = commonComp.AutocompleteDefault
flagCompletion["iidfile"] = commonComp.AutocompleteDefault
flagCompletion["jobs"] = commonComp.AutocompleteNone
@@ -281,18 +291,18 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["os"] = commonComp.AutocompleteNone
flagCompletion["os-feature"] = commonComp.AutocompleteNone
flagCompletion["os-version"] = commonComp.AutocompleteNone
+ flagCompletion["output"] = commonComp.AutocompleteNone
flagCompletion["pull"] = commonComp.AutocompleteDefault
flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
flagCompletion["secret"] = commonComp.AutocompleteNone
- flagCompletion["ssh"] = commonComp.AutocompleteNone
flagCompletion["sign-by"] = commonComp.AutocompleteNone
flagCompletion["signature-policy"] = commonComp.AutocompleteNone
+ flagCompletion["ssh"] = commonComp.AutocompleteNone
flagCompletion["tag"] = commonComp.AutocompleteNone
flagCompletion["target"] = commonComp.AutocompleteNone
flagCompletion["timestamp"] = commonComp.AutocompleteNone
- flagCompletion["variant"] = commonComp.AutocompleteNone
flagCompletion["unsetenv"] = commonComp.AutocompleteNone
- flagCompletion["output"] = commonComp.AutocompleteNone
+ flagCompletion["variant"] = commonComp.AutocompleteNone
return flagCompletion
}
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
index c325bc5cf..acd7bccdc 100644
--- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -26,7 +26,7 @@ type Options struct {
// Note!! : Following API does not handles escaping or validates correctness of the values
// passed to UpperDirOptionFragment instead API will try to pass values as is it
// to the `mount` command. It is user's responsibility to make sure they pre-validate
- // these values. Invalid inputs may lead to undefined behviour.
+ // these values. Invalid inputs may lead to undefined behaviour.
// This is provided as-is, use it if it works for you, we can/will change/break that in the future.
// See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
// TODO: Should we address above comment and handle escaping of metacharacters like
@@ -36,7 +36,7 @@ type Options struct {
// Note!! : Following API does not handles escaping or validates correctness of the values
// passed to WorkDirOptionFragment instead API will try to pass values as is it
// to the `mount` command. It is user's responsibility to make sure they pre-validate
- // these values. Invalid inputs may lead to undefined behviour.
+ // these values. Invalid inputs may lead to undefined behaviour.
// This is provided as-is, use it if it works for you, we can/will change/break that in the future.
// See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
// TODO: Should we address above comment and handle escaping of metacharacters like
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 079863845..cdc421f97 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -137,6 +137,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
cpuShares, _ := flags.GetUint64("cpu-shares")
httpProxy, _ := flags.GetBool("http-proxy")
identityLabel, _ := flags.GetBool("identity-label")
+ omitHistory, _ := flags.GetBool("omit-history")
ulimit := []string{}
if flags.Changed("ulimit") {
@@ -162,6 +163,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
Memory: memoryLimit,
MemorySwap: memorySwap,
NoHosts: noHosts,
+ OmitHistory: omitHistory,
ShmSize: findFlagFunc("shm-size").Value.String(),
Ulimit: ulimit,
Volumes: volumes,
@@ -175,6 +177,31 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
return commonOpts, nil
}
+// GetAdditionalBuildContext consumes raw string and returns parsed AdditionalBuildContext
+func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, error) {
+ ret := define.AdditionalBuildContext{IsURL: false, IsImage: false, Value: value}
+ if strings.HasPrefix(value, "docker-image://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "docker-image://")
+ } else if strings.HasPrefix(value, "container-image://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "container-image://")
+ } else if strings.HasPrefix(value, "docker://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "docker://")
+ } else if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") {
+ ret.IsImage = false
+ ret.IsURL = true
+ } else {
+ path, err := filepath.Abs(value)
+ if err != nil {
+ return define.AdditionalBuildContext{}, errors.Wrapf(err, "unable to convert additional build-context %q path to absolute", value)
+ }
+ ret.Value = path
+ }
+ return ret, nil
+}
+
func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error {
for _, opt := range securityOpts {
if opt == "no-new-privileges" {
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
index 8b11df33c..dcee5ca6f 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
@@ -7,7 +7,6 @@ import (
"path/filepath"
"github.com/containers/buildah/define"
- "github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/devices"
"github.com/pkg/errors"
)
@@ -18,9 +17,6 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
if err != nil {
return nil, err
}
- if unshare.IsRootless() && src != dst {
- return nil, errors.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst)
- }
srcInfo, err := os.Stat(src)
if err != nil {
return nil, errors.Wrapf(err, "error getting info of source device %s", src)
@@ -32,7 +28,8 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
return nil, errors.Wrapf(err, "%s is not a valid device", src)
}
dev.Path = dst
- devs = append(devs, *dev)
+ device := define.BuildahDevice{Device: *dev, Source: src, Destination: dst}
+ devs = append(devs, device)
return devs, nil
}
@@ -44,7 +41,8 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
for _, d := range srcDevices {
d.Path = filepath.Join(dst, filepath.Base(d.Path))
d.Permissions = devices.Permissions(permissions)
- devs = append(devs, *d)
+ device := define.BuildahDevice{Device: *d, Source: src, Destination: dst}
+ devs = append(devs, device)
}
return devs, nil
}
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index e56aac8c9..d31711132 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/image/v5/types"
"github.com/opencontainers/runtime-spec/specs-go"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@@ -178,3 +179,27 @@ type runMountArtifacts struct {
// LockedTargets to be unlocked if there are any.
LockedTargets []string
}
+
+// RunMountInfo are the available run mounts for this run
+type runMountInfo struct {
+ // ContextDir is the root directory for the source location for bind mounts.
+ ContextDir string
+ // Secrets are the available secrets to use in a RUN
+ Secrets map[string]define.Secret
+ // SSHSources is the available ssh agents to use in a RUN
+ SSHSources map[string]*sshagent.Source `json:"-"`
+ // Map of stages and container mountpoint if any from stage executor
+ StageMountPoints map[string]internal.StageMountDetails
+ // System context of current build
+ SystemContext *types.SystemContext
+}
+
+// IDMaps are the UIDs, GID, and maps for the run
+type IDMaps struct {
+ uidmap []spec.LinuxIDMapping
+ gidmap []spec.LinuxIDMapping
+ rootUID int
+ rootGID int
+ processUID int
+ processGID int
+}
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index f52754c54..3d2a83f55 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -35,6 +35,7 @@ import (
"github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/etchosts"
"github.com/containers/common/libnetwork/network"
+ "github.com/containers/common/libnetwork/resolvconf"
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/capabilities"
"github.com/containers/common/pkg/chown"
@@ -50,8 +51,6 @@ import (
"github.com/containers/storage/pkg/unshare"
storagetypes "github.com/containers/storage/types"
"github.com/docker/go-units"
- "github.com/docker/libnetwork/resolvconf"
- "github.com/docker/libnetwork/types"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -144,18 +143,56 @@ func (b *Builder) Run(command []string, options RunOptions) error {
g.SetProcessArgs(nil)
}
- for _, d := range b.Devices {
- sDev := spec.LinuxDevice{
- Type: string(d.Type),
- Path: d.Path,
- Major: d.Major,
- Minor: d.Minor,
- FileMode: &d.FileMode,
- UID: &d.Uid,
- GID: &d.Gid,
+ // Mount devices if any and if session is rootless attempt a bind-mount
+ // just like podman.
+ if unshare.IsRootless() {
+ // We are going to create bind mounts for devices
+ // but we need to make sure that we don't override
+ // anything which is already in OCI spec.
+ mounts := make(map[string]interface{})
+ for _, m := range g.Mounts() {
+ mounts[m.Destination] = true
+ }
+ newMounts := []spec.Mount{}
+ for _, d := range b.Devices {
+ // Default permission is read-only.
+ perm := "ro"
+ // Get permission configured for this device but only process `write`
+ // permission in rootless since `mknod` is not supported anyways.
+ if strings.Contains(string(d.Rule.Permissions), "w") {
+ perm = "rw"
+ }
+ devMnt := spec.Mount{
+ Destination: d.Destination,
+ Type: parse.TypeBind,
+ Source: d.Source,
+ Options: []string{"slave", "nosuid", "noexec", perm, "rbind"},
+ }
+ // Podman parity: podman skips these two devices hence we do the same.
+ if d.Path == "/dev/ptmx" || strings.HasPrefix(d.Path, "/dev/tty") {
+ continue
+ }
+ // Device is already in OCI spec do not re-mount.
+ if _, found := mounts[d.Path]; found {
+ continue
+ }
+ newMounts = append(newMounts, devMnt)
+ }
+ g.Config.Mounts = append(newMounts, g.Config.Mounts...)
+ } else {
+ for _, d := range b.Devices {
+ sDev := spec.LinuxDevice{
+ Type: string(d.Type),
+ Path: d.Path,
+ Major: d.Major,
+ Minor: d.Minor,
+ FileMode: &d.FileMode,
+ UID: &d.Uid,
+ GID: &d.Gid,
+ }
+ g.AddDevice(sDev)
+ g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions))
}
- g.AddDevice(sDev)
- g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions))
}
setupMaskedPaths(g)
@@ -212,7 +249,6 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
bindFiles := make(map[string]string)
- namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...)
volumes := b.Volumes()
// Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
@@ -243,15 +279,12 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
}
- if !(contains(volumes, "/etc/resolv.conf") || (len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none")) {
- resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, namespaceOptions)
+ if !contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") {
+ resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, spec.Linux.Namespaces)
if err != nil {
return err
}
- // Only bind /etc/resolv.conf if there's a network
- if options.ConfigureNetwork != define.NetworkDisabled {
- bindFiles["/etc/resolv.conf"] = resolvFile
- }
+ bindFiles[resolvconf.DefaultResolvConf] = resolvFile
}
// Empty file, so no need to recreate if it exists
if _, ok := bindFiles["/run/.containerenv"]; !ok {
@@ -283,7 +316,16 @@ rootless=%d
bindFiles["/run/.containerenv"] = containerenvPath
}
- runArtifacts, err := b.setupMounts(options.SystemContext, mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.Secrets, options.SSHSources, options.RunMounts, options.ContextDir, options.StageMountPoints)
+
+ runMountInfo := runMountInfo{
+ ContextDir: options.ContextDir,
+ Secrets: options.Secrets,
+ SSHSources: options.SSHSources,
+ StageMountPoints: options.StageMountPoints,
+ SystemContext: options.SystemContext,
+ }
+
+ runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
}
@@ -440,7 +482,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
return mounts, nil
}
-func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, secrets map[string]define.Secret, sshSources map[string]*sshagent.Source, runFileMounts []string, contextDir string, stageMountPoints map[string]internal.StageMountDetails) (*runMountArtifacts, error) {
+func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
// Start building a new list of mounts.
var mounts []specs.Mount
haveMount := func(destination string) bool {
@@ -483,9 +525,16 @@ func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint stri
// Get the list of subscriptions mounts.
subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false)
+ idMaps := IDMaps{
+ uidmap: spec.Linux.UIDMappings,
+ gidmap: spec.Linux.GIDMappings,
+ rootUID: int(rootUID),
+ rootGID: int(rootGID),
+ processUID: int(processUID),
+ processGID: int(processGID),
+ }
// Get the list of mounts that are just for this Run() call.
- // TODO: acui: de-spaghettify run mounts
- runMounts, mountArtifacts, err := b.runSetupRunMounts(context, runFileMounts, secrets, stageMountPoints, sshSources, cdir, contextDir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, int(rootUID), int(rootGID), int(processUID), int(processGID))
+ runMounts, mountArtifacts, err := b.runSetupRunMounts(runFileMounts, runMountInfo, idMaps)
if err != nil {
return nil, err
}
@@ -497,7 +546,7 @@ func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint stri
}
// Get the list of explicitly-specified volume mounts.
- volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID), int(processUID), int(processGID))
+ volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, idMaps)
if err != nil {
return nil, err
}
@@ -541,94 +590,52 @@ func cleanableDestinationListFromMounts(mounts []spec.Mount) []string {
}
// addResolvConf copies files from host and sets them up to bind mount into container
-func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaceOptions define.NamespaceOptions) (string, error) {
- resolvConf := "/etc/resolv.conf"
-
- stat, err := os.Stat(resolvConf)
+func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaces []specs.LinuxNamespace) (string, error) {
+ defaultConfig, err := config.Default()
if err != nil {
- return "", err
- }
- contents, err := ioutil.ReadFile(resolvConf)
- // resolv.conf doesn't have to exists
- if err != nil && !os.IsNotExist(err) {
- return "", err
+ return "", errors.Wrapf(err, "failed to get config")
}
- netns := false
- ns := namespaceOptions.Find(string(spec.NetworkNamespace))
- if ns != nil && !ns.Host {
- netns = true
- }
+ nameservers := make([]string, 0, len(defaultConfig.Containers.DNSServers)+len(dnsServers))
+ nameservers = append(nameservers, defaultConfig.Containers.DNSServers...)
+ nameservers = append(nameservers, dnsServers...)
- nameservers := resolvconf.GetNameservers(contents, types.IPv4)
- // check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver
- if len(nameservers) == 1 && nameservers[0] == "127.0.0.53" && netns {
- // read the actual resolv.conf file for systemd-resolved
- resolvedContents, err := ioutil.ReadFile("/run/systemd/resolve/resolv.conf")
- if err != nil {
- if !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf")
+ keepHostServers := false
+ // special check for slirp ip
+ if len(nameservers) == 0 && b.Isolation == IsolationOCIRootless {
+ for _, ns := range namespaces {
+ if ns.Type == specs.NetworkNamespace && ns.Path == "" {
+ keepHostServers = true
+ // if we are using slirp4netns, also add the built-in DNS server.
+ logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server")
+ nameservers = append([]string{"10.0.2.3"}, nameservers...)
}
- } else {
- contents = resolvedContents
- }
- }
-
- // Ensure that the container's /etc/resolv.conf is compatible with its
- // network configuration.
- if netns {
- // FIXME handle IPv6
- resolve, err := resolvconf.FilterResolvDNS(contents, true)
- if err != nil {
- return "", errors.Wrapf(err, "error parsing host resolv.conf")
- }
- contents = resolve.Content
- }
- search := resolvconf.GetSearchDomains(contents)
- nameservers = resolvconf.GetNameservers(contents, types.IP)
- options := resolvconf.GetOptions(contents)
-
- defaultContainerConfig, err := config.Default()
- if err != nil {
- return "", errors.Wrapf(err, "failed to get container config")
- }
- dnsSearch = append(defaultContainerConfig.Containers.DNSSearches, dnsSearch...)
- if len(dnsSearch) > 0 {
- search = dnsSearch
- }
-
- if b.Isolation == IsolationOCIRootless {
- if ns != nil && !ns.Host && ns.Path == "" {
- // if we are using slirp4netns, also add the built-in DNS server.
- logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server")
- nameservers = append([]string{"10.0.2.3"}, nameservers...)
}
}
- dnsServers = append(defaultContainerConfig.Containers.DNSServers, dnsServers...)
- if len(dnsServers) != 0 {
- dns, err := getDNSIP(dnsServers)
- if err != nil {
- return "", errors.Wrapf(err, "error getting dns servers")
- }
- nameservers = []string{}
- for _, server := range dns {
- nameservers = append(nameservers, server.String())
- }
- }
+ searches := make([]string, 0, len(defaultConfig.Containers.DNSSearches)+len(dnsSearch))
+ searches = append(searches, defaultConfig.Containers.DNSSearches...)
+ searches = append(searches, dnsSearch...)
- dnsOptions = append(defaultContainerConfig.Containers.DNSOptions, dnsOptions...)
- if len(dnsOptions) != 0 {
- options = dnsOptions
- }
+ options := make([]string, 0, len(defaultConfig.Containers.DNSOptions)+len(dnsOptions))
+ options = append(options, defaultConfig.Containers.DNSOptions...)
+ options = append(options, dnsOptions...)
- cfile := filepath.Join(rdir, filepath.Base(resolvConf))
- if _, err = resolvconf.Build(cfile, nameservers, search, options); err != nil {
+ cfile := filepath.Join(rdir, "resolv.conf")
+ if err := resolvconf.New(&resolvconf.Params{
+ Path: cfile,
+ Namespaces: namespaces,
+ IPv6Enabled: true, // TODO we should check if we have ipv6
+ KeepHostServers: keepHostServers,
+ Nameservers: nameservers,
+ Searches: searches,
+ Options: options,
+ }); err != nil {
return "", errors.Wrapf(err, "error building resolv.conf for container %s", b.ContainerID)
}
- uid := int(stat.Sys().(*syscall.Stat_t).Uid)
- gid := int(stat.Sys().(*syscall.Stat_t).Gid)
+ uid := 0
+ gid := 0
if chownOpts != nil {
uid = chownOpts.UID
gid = chownOpts.GID
@@ -1765,7 +1772,7 @@ func (b *Builder) cleanupTempVolumes() {
}
}
-func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, rootUID, rootGID, processUID, processGID int) (mounts []specs.Mount, Err error) {
+func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
// Make sure the overlay directory is clean before running
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
@@ -1823,7 +1830,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
}
}
if foundU {
- if err := chown.ChangeHostPathOwnership(host, true, processUID, processGID); err != nil {
+ if err := chown.ChangeHostPathOwnership(host, true, idMaps.processUID, idMaps.processGID); err != nil {
return specs.Mount{}, err
}
}
@@ -1837,13 +1844,14 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
return specs.Mount{}, err
}
- contentDir, err := overlay.TempDir(containerDir, rootUID, rootGID)
+ contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID)
if err != nil {
return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir)
}
- overlayOpts := overlay.Options{RootUID: rootUID,
- RootGID: rootGID,
+ overlayOpts := overlay.Options{
+ RootUID: idMaps.rootUID,
+ RootGID: idMaps.rootGID,
UpperDirOptionFragment: upperDir,
WorkDirOptionFragment: workDir,
GraphOpts: b.store.GraphOptions(),
@@ -1856,7 +1864,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
// If chown true, add correct ownership to the overlay temp directories.
if foundU {
- if err := chown.ChangeHostPathOwnership(contentDir, true, processUID, processGID); err != nil {
+ if err := chown.ChangeHostPathOwnership(contentDir, true, idMaps.processUID, idMaps.processGID); err != nil {
return specs.Mount{}, err
}
}
@@ -2041,17 +2049,6 @@ func runLookupPath(g *generate.Generator, command []string) []string {
return command
}
-func getDNSIP(dnsServers []string) (dns []net.IP, err error) {
- for _, i := range dnsServers {
- result := net.ParseIP(i)
- if result == nil {
- return dns, errors.Errorf("invalid IP address %s", i)
- }
- dns = append(dns, result)
- }
- return dns, nil
-}
-
func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) {
// Set the user UID/GID/supplemental group list/capabilities lists.
user, homeDir, err := b.userForRun(mountPoint, options.User)
@@ -2096,14 +2093,14 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions
}
}
-func addOrReplaceMount(moutns []specs.Mount, mount specs.Mount) []spec.Mount {
- for i := range moutns {
- if moutns[i].Destination == mount.Destination {
- moutns[i] = mount
- return moutns
+func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []spec.Mount {
+ for i := range mounts {
+ if mounts[i].Destination == mount.Destination {
+ mounts[i] = mount
+ return mounts
}
}
- return append(moutns, mount)
+ return append(mounts, mount)
}
// setupSpecialMountSpecChanges creates special mounts for depending on the namespaces
@@ -2474,7 +2471,7 @@ func init() {
}
// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
-func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []string, secrets map[string]define.Secret, stageMountPoints map[string]internal.StageMountDetails, sshSources map[string]*sshagent.Source, containerWorkingDir string, contextDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, rootUID int, rootGID int, processUID int, processGID int) ([]spec.Mount, *runMountArtifacts, error) {
+func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
mountTargets := make([]string, 0, 10)
tmpFiles := make([]string, 0, len(mounts))
mountImages := make([]string, 0, 10)
@@ -2494,10 +2491,10 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []
if len(arr) == 2 {
tokens = strings.Split(arr[1], ",")
}
- // For now, we only support type secret.
+
switch kv[1] {
case "secret":
- mount, envFile, err := getSecretMount(tokens, secrets, b.MountLabel, containerWorkingDir, uidmap, gidmap)
+ mount, envFile, err := b.getSecretMount(tokens, sources.Secrets, idMaps)
if err != nil {
return nil, nil, err
}
@@ -2509,7 +2506,7 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []
}
}
case "ssh":
- mount, agent, err := b.getSSHMount(tokens, sshCount, sshSources, b.MountLabel, uidmap, gidmap, b.ProcessLabel)
+ mount, agent, err := b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps)
if err != nil {
return nil, nil, err
}
@@ -2524,7 +2521,7 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []
sshCount++
}
case "bind":
- mount, image, err := b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints)
+ mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps)
if err != nil {
return nil, nil, err
}
@@ -2535,14 +2532,14 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []
mountImages = append(mountImages, image)
}
case "tmpfs":
- mount, err := b.getTmpfsMount(tokens, rootUID, rootGID, processUID, processGID)
+ mount, err := b.getTmpfsMount(tokens, idMaps)
if err != nil {
return nil, nil, err
}
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
case "cache":
- mount, lockedPaths, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints)
+ mount, lockedPaths, err := b.getCacheMount(tokens, sources.StageMountPoints, idMaps)
if err != nil {
return nil, nil, err
}
@@ -2564,7 +2561,7 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []
return finalMounts, artifacts, nil
}
-func (b *Builder) getBindMount(context *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, string, error) {
+func (b *Builder) getBindMount(tokens []string, context *imagetypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, string, error) {
if contextDir == "" {
return nil, "", errors.New("Context Directory for current run invocation is not configured")
}
@@ -2574,42 +2571,42 @@ func (b *Builder) getBindMount(context *imagetypes.SystemContext, tokens []strin
return nil, image, err
}
optionMounts = append(optionMounts, mount)
- volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
if err != nil {
return nil, image, err
}
return &volumes[0], image, nil
}
-func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, processGID int) (*spec.Mount, error) {
+func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*spec.Mount, error) {
var optionMounts []specs.Mount
mount, err := internalParse.GetTmpfsMount(tokens)
if err != nil {
return nil, err
}
optionMounts = append(optionMounts, mount)
- volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
if err != nil {
return nil, err
}
return &volumes[0], nil
}
-func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, []string, error) {
+func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, []string, error) {
var optionMounts []specs.Mount
mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints)
if err != nil {
return nil, lockedTargets, err
}
optionMounts = append(optionMounts, mount)
- volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
if err != nil {
return nil, lockedTargets, err
}
return &volumes[0], lockedTargets, nil
}
-func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, string, error) {
+func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secret, idMaps IDMaps) (*spec.Mount, string, error) {
errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
if len(tokens) == 0 {
return nil, "", errInvalidSyntax
@@ -2683,15 +2680,15 @@ func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabe
envFile = tmpFile.Name()
ctrFileOnHost = tmpFile.Name()
case "file":
- data, err = ioutil.ReadFile(secr.Source)
+ containerWorkingDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return nil, "", err
}
- ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id)
- _, err = os.Stat(ctrFileOnHost)
- if !os.IsNotExist(err) {
+ data, err = ioutil.ReadFile(secr.Source)
+ if err != nil {
return nil, "", err
}
+ ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id)
default:
return nil, "", errors.New("invalid source secret type")
}
@@ -2705,10 +2702,10 @@ func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabe
return nil, "", err
}
- if err := label.Relabel(ctrFileOnHost, mountlabel, false); err != nil {
+ if err := label.Relabel(ctrFileOnHost, b.MountLabel, false); err != nil {
return nil, "", err
}
- hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid)
+ hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
if err != nil {
return nil, "", err
}
@@ -2728,7 +2725,7 @@ func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabe
}
// getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container
-func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) {
+func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, idMaps IDMaps) (*spec.Mount, *sshagent.AgentServer, error) {
errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
var err error
@@ -2795,25 +2792,24 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]
return nil, nil, err
}
// Start ssh server, and get the host sock we're mounting in the container
- hostSock, err := fwdAgent.Serve(processLabel)
+ hostSock, err := fwdAgent.Serve(b.ProcessLabel)
if err != nil {
return nil, nil, err
}
- if err := label.Relabel(filepath.Dir(hostSock), mountlabel, false); err != nil {
+ if err := label.Relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil {
if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
}
return nil, nil, err
}
- if err := label.Relabel(hostSock, mountlabel, false); err != nil {
+ if err := label.Relabel(hostSock, b.MountLabel, false); err != nil {
if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
}
return nil, nil, err
}
-
- hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid)
+ hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
if err != nil {
if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go
index 9e62691e8..280176dba 100644
--- a/vendor/github.com/containers/buildah/run_unix.go
+++ b/vendor/github.com/containers/buildah/run_unix.go
@@ -5,6 +5,7 @@ package buildah
import (
"github.com/containers/buildah/define"
nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/containers/storage"
"github.com/pkg/errors"
)
@@ -22,10 +23,19 @@ func (b *Builder) Run(command []string, options RunOptions) error {
return errors.New("function not supported on non-linux systems")
}
func DefaultNamespaceOptions() (NamespaceOptions, error) {
- return NamespaceOptions{}, errors.New("function not supported on non-linux systems")
+ options := NamespaceOptions{
+ {Name: string(specs.CgroupNamespace), Host: false},
+ {Name: string(specs.IPCNamespace), Host: false},
+ {Name: string(specs.MountNamespace), Host: false},
+ {Name: string(specs.NetworkNamespace), Host: false},
+ {Name: string(specs.PIDNamespace), Host: false},
+ {Name: string(specs.UserNamespace), Host: false},
+ {Name: string(specs.UTSNamespace), Host: false},
+ }
+ return options, nil
}
// getNetworkInterface creates the network interface
func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) {
- return nil, errors.New("function not supported on non-linux systems")
+ return nil, nil
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 33a8c5657..986e1d9f7 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -14,6 +14,7 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames"
"github.com/containers/image/v5/signature"
@@ -44,6 +45,11 @@ var (
}
)
+// StringInSlice is deprecated, use github.com/containers/common/pkg/util.StringInSlice
+func StringInSlice(s string, slice []string) bool {
+ return util.StringInSlice(s, slice)
+}
+
// resolveName checks if name is a valid image name, and if that name doesn't
// include a domain portion, returns a list of the names which it might
// correspond to in the set of configured registries, and the transport used to
@@ -244,17 +250,6 @@ func Runtime() string {
return conf.Engine.OCIRuntime
}
-// StringInSlice returns a boolean indicating if the exact value s is present
-// in the slice slice.
-func StringInSlice(s string, slice []string) bool {
- for _, v := range slice {
- if v == s {
- return true
- }
- }
- return false
-}
-
// GetContainerIDs uses ID mappings to compute the container-level IDs that will
// correspond to a UID/GID pair on the host.
func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go
index 01cedc7ed..1cba29143 100644
--- a/vendor/github.com/containers/common/libimage/copier.go
+++ b/vendor/github.com/containers/common/libimage/copier.go
@@ -139,7 +139,7 @@ type CopyOptions struct {
// copier is an internal helper to conveniently copy images.
type copier struct {
imageCopyOptions copy.Options
- retryOptions retry.RetryOptions
+ retryOptions retry.Options
systemContext *types.SystemContext
policyContext *signature.PolicyContext
@@ -370,7 +370,7 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
}
return err
}
- return returnManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions)
+ return returnManifest, retry.IfNecessary(ctx, f, &c.retryOptions)
}
// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
diff --git a/vendor/github.com/containers/common/libimage/define/search.go b/vendor/github.com/containers/common/libimage/define/search.go
new file mode 100644
index 000000000..0abd2ca1c
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/define/search.go
@@ -0,0 +1,13 @@
+package define
+
+const (
+ // SearchFilterAutomated is the key for filtering images by their automated attribute.
+ SearchFilterAutomated = "is-automated"
+ // SearchFilterOfficial is the key for filtering images by their official attribute.
+ SearchFilterOfficial = "is-official"
+ // SearchFilterStars is the key for filtering images by stars.
+ SearchFilterStars = "stars"
+)
+
+// SearchFilters includes all supported search filters.
+var SearchFilters = []string{SearchFilterAutomated, SearchFilterOfficial, SearchFilterStars}
diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go
index 05d60edfc..5da8df1bf 100644
--- a/vendor/github.com/containers/common/libimage/inspect.go
+++ b/vendor/github.com/containers/common/libimage/inspect.go
@@ -128,7 +128,7 @@ func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageDat
Config: &ociImage.Config,
Version: info.DockerVersion,
Size: size,
- VirtualSize: size, // TODO: they should be different (inherited from Podman)
+ VirtualSize: size, // NOTE: same as size. Inherited from Docker where it's scheduled for deprecation.
Digest: i.Digest(),
Labels: info.Labels,
RootFS: &RootFS{
@@ -216,7 +216,7 @@ func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error
return nil, err
}
- img, err := ref.NewImage(ctx, i.runtime.systemContextCopy())
+ img, err := ref.NewImage(ctx, &i.runtime.systemContext)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go
index c2d066645..89faa4635 100644
--- a/vendor/github.com/containers/common/libimage/load.go
+++ b/vendor/github.com/containers/common/libimage/load.go
@@ -114,6 +114,11 @@ func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.Ima
if err != nil {
return nil, err
}
+ defer func() {
+ if err := reader.Close(); err != nil {
+ logrus.Errorf("Closing reader of docker archive: %v", err)
+ }
+ }()
refLists, err := reader.List()
if err != nil {
diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go
index b36bbf396..7af125283 100644
--- a/vendor/github.com/containers/common/libimage/normalize.go
+++ b/vendor/github.com/containers/common/libimage/normalize.go
@@ -1,51 +1,13 @@
package libimage
import (
- "runtime"
"strings"
- "github.com/containerd/containerd/platforms"
"github.com/containers/image/v5/docker/reference"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-// NormalizePlatform normalizes (according to the OCI spec) the specified os,
-// arch and variant. If left empty, the individual item will not be normalized.
-func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) {
- os, arch, variant = rawOS, rawArch, rawVariant
- if os == "" {
- os = runtime.GOOS
- }
- if arch == "" {
- arch = runtime.GOARCH
- }
- rawPlatform := os + "/" + arch
- if variant != "" {
- rawPlatform += "/" + variant
- }
-
- normalizedPlatform, err := platforms.Parse(rawPlatform)
- if err != nil {
- logrus.Debugf("Error normalizing platform: %v", err)
- return rawOS, rawArch, rawVariant
- }
- logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform)
- os = rawOS
- if rawOS != "" {
- os = normalizedPlatform.OS
- }
- arch = rawArch
- if rawArch != "" {
- arch = normalizedPlatform.Architecture
- }
- variant = rawVariant
- if rawVariant != "" {
- variant = normalizedPlatform.Variant
- }
- return os, arch, variant
-}
-
// NormalizeName normalizes the provided name according to the conventions by
// Podman and Buildah. If tag and digest are missing, the "latest" tag will be
// used. If it's a short name, it will be prefixed with "localhost/".
diff --git a/vendor/github.com/containers/common/libimage/platform.go b/vendor/github.com/containers/common/libimage/platform.go
new file mode 100644
index 000000000..736a193f6
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/platform.go
@@ -0,0 +1,87 @@
+package libimage
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/sirupsen/logrus"
+)
+
+// PlatformPolicy controls the behavior of image-platform matching.
+type PlatformPolicy int
+
+const (
+ // Only debug log if an image does not match the expected platform.
+ PlatformPolicyDefault PlatformPolicy = iota
+ // Warn if an image does not match the expected platform.
+ PlatformPolicyWarn
+)
+
+// NormalizePlatform normalizes (according to the OCI spec) the specified os,
+// arch and variant. If left empty, the individual item will not be normalized.
+func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) {
+ rawPlatform := toPlatformString(rawOS, rawArch, rawVariant)
+ normalizedPlatform, err := platforms.Parse(rawPlatform)
+ if err != nil {
+ logrus.Debugf("Error normalizing platform: %v", err)
+ return rawOS, rawArch, rawVariant
+ }
+ logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform)
+ os = rawOS
+ if rawOS != "" {
+ os = normalizedPlatform.OS
+ }
+ arch = rawArch
+ if rawArch != "" {
+ arch = normalizedPlatform.Architecture
+ }
+ variant = rawVariant
+ if rawVariant != "" {
+ variant = normalizedPlatform.Variant
+ }
+ return os, arch, variant
+}
+
+func toPlatformString(os, arch, variant string) string {
+ if os == "" {
+ os = runtime.GOOS
+ }
+ if arch == "" {
+ arch = runtime.GOARCH
+ }
+ if variant == "" {
+ return fmt.Sprintf("%s/%s", os, arch)
+ }
+ return fmt.Sprintf("%s/%s/%s", os, arch, variant)
+}
+
+// Checks whether the image matches the specified platform.
+// Returns
+// * 1) a matching error that can be used for logging (or returning) what does not match
+// * 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error)
+// * 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.)
+func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) {
+ inspectInfo, err := i.inspectInfo(ctx)
+ if err != nil {
+ return nil, false, fmt.Errorf("inspecting image: %w", err)
+ }
+
+ customPlatform := len(os)+len(arch)+len(variant) != 0
+
+ expected, err := platforms.Parse(toPlatformString(os, arch, variant))
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing host platform: %v", err)
+ }
+ fromImage, err := platforms.Parse(toPlatformString(inspectInfo.Os, inspectInfo.Architecture, inspectInfo.Variant))
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing image platform: %v", err)
+ }
+
+ if platforms.NewMatcher(expected).Match(fromImage) {
+ return nil, customPlatform, nil
+ }
+
+ return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", fromImage, expected), customPlatform, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
index 4ce8add2f..2071cceca 100644
--- a/vendor/github.com/containers/common/libimage/pull.go
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -161,11 +161,30 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
localImages := []*Image{}
for _, name := range pulledImages {
- local, _, err := r.LookupImage(name, nil)
+ image, _, err := r.LookupImage(name, nil)
if err != nil {
return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name)
}
- localImages = append(localImages, local)
+
+ // Note that we can ignore the 2nd return value here. Some
+ // images may ship with "wrong" platform, but we already warn
+ // about it. Throwing an error is not (yet) the plan.
+ matchError, _, err := image.matchesPlatform(ctx, options.OS, options.Architecture, options.Variant)
+ if err != nil {
+ return nil, fmt.Errorf("checking platform of image %s: %w", name, err)
+ }
+
+ // If the image does not match the expected/requested platform,
+ // make sure to leave some breadcrumbs for the user.
+ if matchError != nil {
+ if options.Writer == nil {
+ logrus.Warnf("%v", matchError)
+ } else {
+ fmt.Fprintf(options.Writer, "WARNING: %v\n", matchError)
+ }
+ }
+
+ localImages = append(localImages, image)
}
return localImages, pullError
@@ -315,6 +334,11 @@ func (r *Runtime) copyFromDockerArchive(ctx context.Context, ref types.ImageRefe
if err != nil {
return nil, err
}
+ defer func() {
+ if err := reader.Close(); err != nil {
+ logrus.Errorf("Closing reader of docker archive: %v", err)
+ }
+ }()
return r.copyFromDockerArchiveReaderReference(ctx, reader, readerRef, options)
}
@@ -528,9 +552,6 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
sys := r.systemContextCopy()
resolved, err := shortnames.Resolve(sys, imageName)
if err != nil {
- // TODO: that is a too big of a hammer since we should only
- // ignore errors that indicate that there's no alias and no
- // USRs. Must be addressed in c/image first.
if localImage != nil && pullPolicy == config.PullPolicyNewer {
return []string{resolvedImageName}, nil
}
diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go
index 472482410..7e975b81d 100644
--- a/vendor/github.com/containers/common/libimage/runtime.go
+++ b/vendor/github.com/containers/common/libimage/runtime.go
@@ -182,6 +182,9 @@ type LookupImageOptions struct {
// Lookup an image matching the specified variant.
Variant string
+ // Controls the behavior when checking the platform of an image.
+ PlatformPolicy PlatformPolicy
+
// If set, do not look for items/instances in the manifest list that
// match the current platform but return the manifest list as is.
// only check for manifest list, return ErrNotAManifestList if not found.
@@ -378,21 +381,36 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
image = instance
}
- matches, err := r.imageReferenceMatchesContext(ref, options)
- if err != nil {
- return nil, err
- }
-
- // NOTE: if the user referenced by ID we must optimistically assume
- // that they know what they're doing. Given, we already did the
- // manifest limbo above, we may already have resolved it.
- if !matches && !strings.HasPrefix(image.ID(), candidate) {
- return nil, nil
- }
// Also print the string within the storage transport. That may aid in
// debugging when using additional stores since we see explicitly where
// the store is and which driver (options) are used.
logrus.Debugf("Found image %q as %q in local containers storage (%s)", name, candidate, ref.StringWithinTransport())
+
+ // Do not perform any further platform checks if the image was
+ // requested by ID. In that case, we must assume that the user/tool
+ // know what they're doing.
+ if strings.HasPrefix(image.ID(), candidate) {
+ return image, nil
+ }
+
+ // Ignore the (fatal) error since the image may be corrupted, which
+ // will bubble up at other places. During lookup, we just return it as
+ // is.
+ if matchError, customPlatform, _ := image.matchesPlatform(context.Background(), options.OS, options.Architecture, options.Variant); matchError != nil {
+ if customPlatform {
+ logrus.Debugf("%v", matchError)
+ // Return nil if the user clearly requested a custom
+ // platform and the located image does not match.
+ return nil, nil
+ }
+ switch options.PlatformPolicy {
+ case PlatformPolicyDefault:
+ logrus.Debugf("%v", matchError)
+ case PlatformPolicyWarn:
+ logrus.Warnf("%v", matchError)
+ }
+ }
+
return image, nil
}
@@ -497,40 +515,6 @@ func (r *Runtime) ResolveName(name string) (string, error) {
return normalized.String(), nil
}
-// imageReferenceMatchesContext return true if the specified reference matches
-// the platform (os, arch, variant) as specified by the lookup options.
-func (r *Runtime) imageReferenceMatchesContext(ref types.ImageReference, options *LookupImageOptions) (bool, error) {
- if options.Architecture+options.OS+options.Variant == "" {
- return true, nil
- }
-
- ctx := context.Background()
- img, err := ref.NewImage(ctx, &r.systemContext)
- if err != nil {
- return false, err
- }
- defer img.Close()
- data, err := img.Inspect(ctx)
- if err != nil {
- return false, err
- }
-
- if options.Architecture != "" && options.Architecture != data.Architecture {
- logrus.Debugf("architecture %q does not match architecture %q of image %s", options.Architecture, data.Architecture, ref)
- return false, nil
- }
- if options.OS != "" && options.OS != data.Os {
- logrus.Debugf("OS %q does not match OS %q of image %s", options.OS, data.Os, ref)
- return false, nil
- }
- if options.Variant != "" && options.Variant != data.Variant {
- logrus.Debugf("variant %q does not match variant %q of image %s", options.Variant, data.Variant, ref)
- return false, nil
- }
-
- return true, nil
-}
-
// IsExternalContainerFunc allows for checking whether the specified container
// is an external one. The definition of an external container can be set by
// callers.
diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go
index 33a4776ce..204bcc8c7 100644
--- a/vendor/github.com/containers/common/libimage/search.go
+++ b/vendor/github.com/containers/common/libimage/search.go
@@ -7,6 +7,7 @@ import (
"strings"
"sync"
+ "github.com/containers/common/libimage/define"
registryTransport "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/transports/alltransports"
@@ -81,22 +82,22 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) {
for _, f := range filter {
arr := strings.SplitN(f, "=", 2)
switch arr[0] {
- case "stars":
+ case define.SearchFilterStars:
if len(arr) < 2 {
- return nil, errors.Errorf("invalid `stars` filter %q, should be stars=<value>", filter)
+ return nil, errors.Errorf("invalid filter %q, should be stars=<value>", filter)
}
stars, err := strconv.Atoi(arr[1])
if err != nil {
return nil, errors.Wrapf(err, "incorrect value type for stars filter")
}
sFilter.Stars = stars
- case "is-automated":
+ case define.SearchFilterAutomated:
if len(arr) == 2 && arr[1] == "false" {
sFilter.IsAutomated = types.OptionalBoolFalse
} else {
sFilter.IsAutomated = types.OptionalBoolTrue
}
- case "is-official":
+ case define.SearchFilterOfficial:
if len(arr) == 2 && arr[1] == "false" {
sFilter.IsOfficial = types.OptionalBoolFalse
} else {
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
index bda7ed7d0..96a2a9a4a 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go
index 6bfa8d63b..79d7ef120 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go
@@ -16,8 +16,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
index 25cc173a6..a407a8dea 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go
index f6954db05..c86196c17 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/config.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/config.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package cni
@@ -12,7 +12,6 @@ import (
pkgutil "github.com/containers/common/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/vishvananda/netlink"
)
// NetworkCreate will take a partial filled Network and fill the
@@ -133,14 +132,7 @@ func (n *cniNetwork) NetworkRemove(nameOrID string) error {
// Remove the bridge network interface on the host.
if network.libpodNet.Driver == types.BridgeNetworkDriver {
- link, err := netlink.LinkByName(network.libpodNet.NetworkInterface)
- if err == nil {
- err = netlink.LinkDel(link)
- // only log the error, it is not fatal
- if err != nil {
- logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err)
- }
- }
+ deleteLink(network.libpodNet.NetworkInterface)
}
file := network.filename
diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go
new file mode 100644
index 000000000..ff95c0e17
--- /dev/null
+++ b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go
@@ -0,0 +1,17 @@
+//go:build freebsd
+// +build freebsd
+
+package cni
+
+import (
+ "os/exec"
+
+ "github.com/sirupsen/logrus"
+)
+
+func deleteLink(name string) {
+ if output, err := exec.Command("ifconfig", name, "destroy").CombinedOutput(); err != nil {
+ // only log the error, it is not fatal
+ logrus.Infof("Failed to remove network interface %s: %v: %s", name, err, output)
+ }
+}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_linux.go b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go
new file mode 100644
index 000000000..836fd73bf
--- /dev/null
+++ b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go
@@ -0,0 +1,20 @@
+//go:build linux
+// +build linux
+
+package cni
+
+import (
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netlink"
+)
+
+func deleteLink(name string) {
+ link, err := netlink.LinkByName(name)
+ if err == nil {
+ err = netlink.LinkDel(link)
+ // only log the error, it is not fatal
+ if err != nil {
+ logrus.Infof("Failed to remove network interface %s: %v", name, err)
+ }
+ }
+}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go
index 82b9cbd2e..561f309d0 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/network.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/network.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go
index c5461d74c..35236cf25 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/run.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/run.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package cni
@@ -12,13 +12,11 @@ import (
"github.com/containernetworking/cni/libcni"
cnitypes "github.com/containernetworking/cni/pkg/types"
types040 "github.com/containernetworking/cni/pkg/types/040"
- "github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/vishvananda/netlink"
)
// Setup will setup the container network namespace. It returns
@@ -36,14 +34,7 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma
return nil, err
}
- // set the loopback adapter up in the container netns
- err = ns.WithNetNSPath(namespacePath, func(_ ns.NetNS) error {
- link, err := netlink.LinkByName("lo")
- if err == nil {
- err = netlink.LinkSetUp(link)
- }
- return err
- })
+ err = setupLoopback(namespacePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to set the loopback adapter up")
}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go b/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go
new file mode 100644
index 000000000..c356a864a
--- /dev/null
+++ b/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go
@@ -0,0 +1,13 @@
+package cni
+
+import (
+ "os/exec"
+)
+
+// FreeBSD vnet adds the lo0 interface automatically - we just need to
+// add the default address. Note: this will also add ::1 as a side
+// effect.
+func setupLoopback(namespacePath string) error {
+ // The jexec wrapper runs the ifconfig command inside the jail.
+ return exec.Command("jexec", namespacePath, "ifconfig", "lo0", "inet", "127.0.0.1").Run()
+}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/run_linux.go b/vendor/github.com/containers/common/libnetwork/cni/run_linux.go
new file mode 100644
index 000000000..735e4960e
--- /dev/null
+++ b/vendor/github.com/containers/common/libnetwork/cni/run_linux.go
@@ -0,0 +1,17 @@
+package cni
+
+import (
+ "github.com/containernetworking/plugins/pkg/ns"
+ "github.com/vishvananda/netlink"
+)
+
+func setupLoopback(namespacePath string) error {
+ // set the loopback adapter up in the container netns
+ return ns.WithNetNSPath(namespacePath, func(_ ns.NetNS) error {
+ link, err := netlink.LinkByName("lo")
+ if err == nil {
+ err = netlink.LinkSetUp(link)
+ }
+ return err
+ })
+}
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go
index f2c72ab9e..d8843eb2c 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/config.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go
index 29a7b4f2a..b375acd1b 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/const.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
index ac87c5438..65dcd5497 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
index 861854351..89820f1d6 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go
index 8e7576a56..0d03cd5e6 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/network.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go
index c5aa181fd..7f0a84140 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/run.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go
index e70f096a4..f41598f77 100644
--- a/vendor/github.com/containers/common/libnetwork/network/interface.go
+++ b/vendor/github.com/containers/common/libnetwork/network/interface.go
@@ -1,5 +1,5 @@
-//go:build linux
-// +build linux
+//go:build linux || freebsd
+// +build linux freebsd
package network
@@ -25,14 +25,8 @@ import (
const (
// defaultNetworkBackendFileName is the file name for sentinel file to store the backend
defaultNetworkBackendFileName = "defaultNetworkBackend"
- // cniConfigDir is the directory where cni configuration is found
- cniConfigDir = "/etc/cni/net.d/"
// cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins
cniConfigDirRootless = "cni/net.d/"
- // netavarkConfigDir is the config directory for the rootful network files
- netavarkConfigDir = "/etc/containers/networks"
- // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db
- netavarkRunDir = "/run/containers/networks"
// netavarkBinary is the name of the netavark binary
netavarkBinary = "netavark"
diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_freebsd.go b/vendor/github.com/containers/common/libnetwork/network/interface_freebsd.go
new file mode 100644
index 000000000..4d60b25c7
--- /dev/null
+++ b/vendor/github.com/containers/common/libnetwork/network/interface_freebsd.go
@@ -0,0 +1,10 @@
+package network
+
+const (
+ // cniConfigDir is the directory where cni configuration is found
+ cniConfigDir = "/usr/local/etc/cni/net.d/"
+ // netavarkConfigDir is the config directory for the rootful network files
+ netavarkConfigDir = "/usr/local/etc/containers/networks"
+ // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db
+ netavarkRunDir = "/var/run/containers/networks"
+)
diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_linux.go b/vendor/github.com/containers/common/libnetwork/network/interface_linux.go
new file mode 100644
index 000000000..a16194400
--- /dev/null
+++ b/vendor/github.com/containers/common/libnetwork/network/interface_linux.go
@@ -0,0 +1,10 @@
+package network
+
+const (
+ // cniConfigDir is the directory where cni configuration is found
+ cniConfigDir = "/etc/cni/net.d/"
+ // netavarkConfigDir is the config directory for the rootful network files
+ netavarkConfigDir = "/etc/containers/networks"
+ // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db
+ netavarkRunDir = "/run/containers/networks"
+)
diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go
new file mode 100644
index 000000000..c451d3b49
--- /dev/null
+++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go
@@ -0,0 +1,182 @@
+package resolvconf
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/common/pkg/util"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ localhost = "127.0.0.1"
+ systemdResolvedIP = "127.0.0.53"
+)
+
+// Params for the New() function.
+type Params struct {
+ // Path is the path to new resolv.conf file which should be created.
+ Path string
+ // Namespaces is the list of container namespaces.
+ // This is required to fist check for a resolv.conf under /etc/netns,
+ // created by "ip netns". Also used to check if the container has a
+ // netns in which case localhost nameserver must be filtered.
+ Namespaces []specs.LinuxNamespace
+ // IPv6Enabled will filter ipv6 nameservers when not set to true.
+ IPv6Enabled bool
+ // KeepHostServers can be set when it is required to still keep the
+ // original resolv.conf content even when custom Nameserver/Searches/Options
+ // are set. In this case they will be appended to the given values.
+ KeepHostServers bool
+ // Nameservers is a list of nameservers the container should use,
+ // instead of the default ones from the host.
+ Nameservers []string
+ // Searches is a list of dns search domains the container should use,
+ // instead of the default ones from the host.
+ Searches []string
+ // Options is a list of dns options the container should use,
+ // instead of the default ones from the host.
+ Options []string
+
+ // resolvConfPath is the path which should be used as base to get the dns
+ // options. This should only be used for testing purposes. For all other
+ // callers this defaults to /etc/resolv.conf.
+ resolvConfPath string
+}
+
+func getDefaultResolvConf(params *Params) ([]byte, bool, error) {
+ resolveConf := DefaultResolvConf
+ // this is only used by testing
+ if params.resolvConfPath != "" {
+ resolveConf = params.resolvConfPath
+ }
+ hostNS := true
+ for _, ns := range params.Namespaces {
+ if ns.Type == specs.NetworkNamespace {
+ hostNS = false
+ if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
+ // check for netns created by "ip netns"
+ path := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
+ _, err := os.Stat(path)
+ if err == nil {
+ resolveConf = path
+ }
+ }
+ break
+ }
+ }
+
+ contents, err := os.ReadFile(resolveConf)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, false, err
+ }
+ if hostNS {
+ return contents, hostNS, nil
+ }
+
+ ns := getNameservers(contents)
+ // Check for local only resolver, in this case we want to get the real nameservers
+ // since localhost is not reachable from the netns.
+ if len(ns) == 1 {
+ var path string
+ switch ns[0] {
+ case systemdResolvedIP:
+ // used by systemd-resolved
+ path = "/run/systemd/resolve/resolv.conf"
+ case localhost:
+ // used by NetworkManager https://github.com/containers/podman/issues/13599
+ path = "/run/NetworkManager/no-stub-resolv.conf"
+ }
+ if path != "" {
+ // read the actual resolv.conf file for
+ resolvedContents, err := os.ReadFile(path)
+ if err != nil {
+ // do not error when the file does not exists, the detection logic is not perfect
+ if !errors.Is(err, os.ErrNotExist) {
+ return nil, false, fmt.Errorf("local resolver detected, but could not read real resolv.conf at %q: %w", path, err)
+ }
+ } else {
+ logrus.Debugf("found local resolver, using %q to get the nameservers", path)
+ contents = resolvedContents
+ }
+ }
+ }
+
+ return contents, hostNS, nil
+}
+
+// unsetSearchDomainsIfNeeded removes the search domain when they contain a single dot as element.
+func unsetSearchDomainsIfNeeded(searches []string) []string {
+ if util.StringInSlice(".", searches) {
+ return nil
+ }
+ return searches
+}
+
+// New creates a new resolv.conf file with the given params.
+func New(params *Params) error {
+ // short path, if everything is given there is no need to actually read the hosts /etc/resolv.conf
+ if len(params.Nameservers) > 0 && len(params.Options) > 0 && len(params.Searches) > 0 && !params.KeepHostServers {
+ return build(params.Path, params.Nameservers, unsetSearchDomainsIfNeeded(params.Searches), params.Options)
+ }
+
+ content, hostNS, err := getDefaultResolvConf(params)
+ if err != nil {
+ return fmt.Errorf("failed to get the default /etc/resolv.conf content: %w", err)
+ }
+
+ content = filterResolvDNS(content, params.IPv6Enabled, !hostNS)
+
+ nameservers := params.Nameservers
+ if len(nameservers) == 0 || params.KeepHostServers {
+ nameservers = append(nameservers, getNameservers(content)...)
+ }
+
+ searches := unsetSearchDomainsIfNeeded(params.Searches)
+ // if no params.Searches then use host ones
+ // otherwise make sure that they were no explicitly unset before adding host ones
+ if len(params.Searches) == 0 || (params.KeepHostServers && len(searches) > 0) {
+ searches = append(searches, getSearchDomains(content)...)
+ }
+
+ options := params.Options
+ if len(options) == 0 || params.KeepHostServers {
+ options = append(options, getOptions(content)...)
+ }
+
+ return build(params.Path, nameservers, searches, options)
+}
+
+// Add will add the given nameservers to the given resolv.conf file.
+// It will add the nameserver in front of the existing ones.
+func Add(path string, nameservers []string) error {
+ contents, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ nameservers = append(nameservers, getNameservers(contents)...)
+ return build(path, nameservers, getSearchDomains(contents), getOptions(contents))
+}
+
+// Remove the given nameserver from the given resolv.conf file.
+func Remove(path string, nameservers []string) error {
+ contents, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ oldNameservers := getNameservers(contents)
+ newNameserver := make([]string, 0, len(oldNameservers))
+ for _, ns := range oldNameservers {
+ if !util.StringInSlice(ns, nameservers) {
+ newNameserver = append(newNameserver, ns)
+ }
+ }
+
+ return build(path, newNameserver, getSearchDomains(contents), getOptions(contents))
+}
diff --git a/pkg/resolvconf/resolvconf.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go
index f23cd61b0..54b8c3227 100644
--- a/pkg/resolvconf/resolvconf.go
+++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go
@@ -1,26 +1,23 @@
// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf.
-// Originally from github.com/docker/libnetwork/resolvconf.
+// Originally from github.com/docker/libnetwork/resolvconf but heavily modified to better work with podman.
package resolvconf
import (
"bytes"
- "io/ioutil"
+ "os"
"regexp"
"strings"
- "sync"
- "github.com/containers/podman/v4/pkg/resolvconf/dns"
- "github.com/containers/storage/pkg/ioutils"
"github.com/sirupsen/logrus"
)
const (
- // DefaultResolvConf points to the default file used for dns configuration on a linux machine
+ // DefaultResolvConf points to the default file used for dns configuration on a linux machine.
DefaultResolvConf = "/etc/resolv.conf"
)
var (
- // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
+ // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS.
defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"}
ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
@@ -29,94 +26,30 @@ var (
// will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
// -- e.g. other link-local types -- either won't work in containers or are unnecessary.
// For readability and sufficiency for Docker purposes this seemed more reasonable than a
- // 1000+ character regexp with exact and complete IPv6 validation
+ // 1000+ character regexp with exact and complete IPv6 validation.
ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?`
- localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`)
+ // ipLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
+ ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
+
+ localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`)
nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`)
nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`)
optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`)
)
-var lastModified struct {
- sync.Mutex
- sha256 string
- contents []byte
-}
-
-// File contains the resolv.conf content and its hash
-type File struct {
- Content []byte
- Hash string
-}
-
-// Get returns the contents of /etc/resolv.conf and its hash
-func Get() (*File, error) {
- return GetSpecific(DefaultResolvConf)
-}
-
-// GetSpecific returns the contents of the user specified resolv.conf file and its hash
-func GetSpecific(path string) (*File, error) {
- resolv, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- hash, err := ioutils.HashData(bytes.NewReader(resolv))
- if err != nil {
- return nil, err
- }
- return &File{Content: resolv, Hash: hash}, nil
-}
-
-// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash
-// and, if modified since last check, returns the bytes and new hash.
-// This feature is used by the resolv.conf updater for containers
-func GetIfChanged() (*File, error) {
- lastModified.Lock()
- defer lastModified.Unlock()
-
- resolv, err := ioutil.ReadFile("/etc/resolv.conf")
- if err != nil {
- return nil, err
- }
- newHash, err := ioutils.HashData(bytes.NewReader(resolv))
- if err != nil {
- return nil, err
- }
- if lastModified.sha256 != newHash {
- lastModified.sha256 = newHash
- lastModified.contents = resolv
- return &File{Content: resolv, Hash: newHash}, nil
- }
- // nothing changed, so return no data
- return nil, nil
-}
-
-// GetLastModified retrieves the last used contents and hash of the host resolv.conf.
-// Used by containers updating on restart
-func GetLastModified() *File {
- lastModified.Lock()
- defer lastModified.Unlock()
-
- return &File{Content: lastModified.contents, Hash: lastModified.sha256}
-}
-
-// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs:
+// filterResolvDNS cleans up the config in resolvConf. It has two main jobs:
// 1. If a netns is enabled, it looks for localhost (127.*|::1) entries in the provided
// resolv.conf, removing local nameserver entries, and, if the resulting
// cleaned config has no defined nameservers left, adds default DNS entries
// 2. Given the caller provides the enable/disable state of IPv6, the filter
// code will remove all IPv6 nameservers if it is not enabled for containers
//
-func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) (*File, error) {
+func filterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) []byte {
// If we're using the host netns, we have nothing to do besides hash the file.
if !netnsEnabled {
- hash, err := ioutils.HashData(bytes.NewReader(resolvConf))
- if err != nil {
- return nil, err
- }
- return &File{Content: resolvConf, Hash: hash}, nil
+ return resolvConf
}
cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})
// if IPv6 is not enabled, also clean out any IPv6 address nameserver
@@ -125,7 +58,7 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) (*F
}
// if the resulting resolvConf has no more nameservers defined, add appropriate
// default DNS servers for IPv4 and (optionally) IPv6
- if len(GetNameservers(cleanedResolvConf)) == 0 {
+ if len(getNameservers(cleanedResolvConf)) == 0 {
logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns)
dns := defaultIPv4Dns
if ipv6Enabled {
@@ -134,19 +67,15 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) (*F
}
cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...)
}
- hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf))
- if err != nil {
- return nil, err
- }
- return &File{Content: cleanedResolvConf, Hash: hash}, nil
+ return cleanedResolvConf
}
// getLines parses input into lines and strips away comments.
-func getLines(input []byte, commentMarker []byte) [][]byte {
+func getLines(input []byte) [][]byte {
lines := bytes.Split(input, []byte("\n"))
var output [][]byte
for _, currentLine := range lines {
- var commentIndex = bytes.Index(currentLine, commentMarker)
+ commentIndex := bytes.Index(currentLine, []byte("#"))
if commentIndex == -1 {
output = append(output, currentLine)
} else {
@@ -156,10 +85,10 @@ func getLines(input []byte, commentMarker []byte) [][]byte {
return output
}
-// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
-func GetNameservers(resolvConf []byte) []string {
+// getNameservers returns nameservers (if any) listed in /etc/resolv.conf.
+func getNameservers(resolvConf []byte) []string {
nameservers := []string{}
- for _, line := range getLines(resolvConf, []byte("#")) {
+ for _, line := range getLines(resolvConf) {
ns := nsRegexp.FindSubmatch(line)
if len(ns) > 0 {
nameservers = append(nameservers, string(ns[1]))
@@ -168,30 +97,12 @@ func GetNameservers(resolvConf []byte) []string {
return nameservers
}
-// GetNameserversAsCIDR returns nameservers (if any) listed in
-// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
-// This function's output is intended for net.ParseCIDR
-func GetNameserversAsCIDR(resolvConf []byte) []string {
- nameservers := []string{}
- for _, nameserver := range GetNameservers(resolvConf) {
- var address string
- // If IPv6, strip zone if present
- if strings.Contains(nameserver, ":") {
- address = strings.Split(nameserver, "%")[0] + "/128"
- } else {
- address = nameserver + "/32"
- }
- nameservers = append(nameservers, address)
- }
- return nameservers
-}
-
-// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf
+// getSearchDomains returns search domains (if any) listed in /etc/resolv.conf
// If more than one search line is encountered, only the contents of the last
// one is returned.
-func GetSearchDomains(resolvConf []byte) []string {
+func getSearchDomains(resolvConf []byte) []string {
domains := []string{}
- for _, line := range getLines(resolvConf, []byte("#")) {
+ for _, line := range getLines(resolvConf) {
match := searchRegexp.FindSubmatch(line)
if match == nil {
continue
@@ -201,12 +112,12 @@ func GetSearchDomains(resolvConf []byte) []string {
return domains
}
-// GetOptions returns options (if any) listed in /etc/resolv.conf
+// getOptions returns options (if any) listed in /etc/resolv.conf
// If more than one options line is encountered, only the contents of the last
// one is returned.
-func GetOptions(resolvConf []byte) []string {
+func getOptions(resolvConf []byte) []string {
options := []string{}
- for _, line := range getLines(resolvConf, []byte("#")) {
+ for _, line := range getLines(resolvConf) {
match := optionsRegexp.FindSubmatch(line)
if match == nil {
continue
@@ -216,35 +127,30 @@ func GetOptions(resolvConf []byte) []string {
return options
}
-// Build writes a configuration file to path containing a "nameserver" entry
+// build writes a configuration file to path containing a "nameserver" entry
// for every element in dns, a "search" entry for every element in
// dnsSearch, and an "options" entry for every element in dnsOptions.
-func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) {
- content := bytes.NewBuffer(nil)
+func build(path string, dns, dnsSearch, dnsOptions []string) error {
+ content := new(bytes.Buffer)
if len(dnsSearch) > 0 {
if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." {
if _, err := content.WriteString("search " + searchString + "\n"); err != nil {
- return nil, err
+ return err
}
}
}
for _, dns := range dns {
if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil {
- return nil, err
+ return err
}
}
if len(dnsOptions) > 0 {
if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" {
if _, err := content.WriteString("options " + optsString + "\n"); err != nil {
- return nil, err
+ return err
}
}
}
- hash, err := ioutils.HashData(bytes.NewReader(content.Bytes()))
- if err != nil {
- return nil, err
- }
-
- return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644)
+ return os.WriteFile(path, content.Bytes(), 0o644)
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go
index 0fb61c757..a72a641c8 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/blkio.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/blkio.go
@@ -1,3 +1,6 @@
+//go:build !linux
+// +build !linux
+
package cgroups
import (
diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go
new file mode 100644
index 000000000..98b8ae541
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go
@@ -0,0 +1,161 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/pkg/errors"
+)
+
+type linuxBlkioHandler struct {
+ Blkio fs.BlkioGroup
+}
+
+func getBlkioHandler() *linuxBlkioHandler {
+ return &linuxBlkioHandler{}
+}
+
+// Apply set the specified constraints
+func (c *linuxBlkioHandler) Apply(ctr *CgroupControl, res *configs.Resources) error {
+ if ctr.cgroup2 {
+ man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path))
+ if err != nil {
+ return err
+ }
+ return man.Set(res)
+
+ }
+ path := filepath.Join(cgroupRoot, Blkio, ctr.config.Path)
+ return c.Blkio.Set(path, res)
+}
+
+// Create the cgroup
+func (c *linuxBlkioHandler) Create(ctr *CgroupControl) (bool, error) {
+ if ctr.cgroup2 {
+ return false, nil
+ }
+ return ctr.createCgroupDirectory(Blkio)
+}
+
+// Destroy the cgroup
+func (c *linuxBlkioHandler) Destroy(ctr *CgroupControl) error {
+ return rmDirRecursively(ctr.getCgroupv1Path(Blkio))
+}
+
+// Stat fills a metrics structure with usage stats for the controller
+func (c *linuxBlkioHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
+ var ioServiceBytesRecursive []cgroups.BlkioStatEntry
+
+ if ctr.cgroup2 {
+ // more details on the io.stat file format:X https://facebookmicrosites.github.io/cgroup2/docs/io-controller.html
+ values, err := readCgroup2MapFile(ctr, "io.stat")
+ if err != nil {
+ return err
+ }
+ for k, v := range values {
+ d := strings.Split(k, ":")
+ if len(d) != 2 {
+ continue
+ }
+ minor, err := strconv.ParseUint(d[0], 10, 0)
+ if err != nil {
+ return err
+ }
+ major, err := strconv.ParseUint(d[1], 10, 0)
+ if err != nil {
+ return err
+ }
+
+ for _, item := range v {
+ d := strings.Split(item, "=")
+ if len(d) != 2 {
+ continue
+ }
+ op := d[0]
+
+ // Accommodate the cgroup v1 naming
+ switch op {
+ case "rbytes":
+ op = "read"
+ case "wbytes":
+ op = "write"
+ }
+
+ value, err := strconv.ParseUint(d[1], 10, 0)
+ if err != nil {
+ return err
+ }
+
+ entry := cgroups.BlkioStatEntry{
+ Op: op,
+ Major: major,
+ Minor: minor,
+ Value: value,
+ }
+ ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry)
+ }
+ }
+ } else {
+ BlkioRoot := ctr.getCgroupv1Path(Blkio)
+
+ p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive")
+ f, err := os.Open(p)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return errors.Wrapf(err, "open %s", p)
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(line)
+ if len(parts) < 3 {
+ continue
+ }
+ d := strings.Split(parts[0], ":")
+ if len(d) != 2 {
+ continue
+ }
+ minor, err := strconv.ParseUint(d[0], 10, 0)
+ if err != nil {
+ return err
+ }
+ major, err := strconv.ParseUint(d[1], 10, 0)
+ if err != nil {
+ return err
+ }
+
+ op := parts[1]
+
+ value, err := strconv.ParseUint(parts[2], 10, 0)
+ if err != nil {
+ return err
+ }
+ entry := cgroups.BlkioStatEntry{
+ Op: op,
+ Major: major,
+ Minor: minor,
+ Value: value,
+ }
+ ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry)
+ }
+ if err := scanner.Err(); err != nil {
+ return errors.Wrapf(err, "parse %s", p)
+ }
+ }
+ m.BlkioStats.IoServiceBytesRecursive = ioServiceBytesRecursive
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go
index 57997d652..eb903d3c3 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go
@@ -1,8 +1,10 @@
+//go:build !linux
+// +build !linux
+
package cgroups
import (
"bufio"
- "bytes"
"context"
"fmt"
"io/ioutil"
@@ -248,47 +250,6 @@ func (c *CgroupControl) getCgroupv1Path(name string) string {
return filepath.Join(cgroupRoot, name, c.path)
}
-// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers
-func createCgroupv2Path(path string) (deferredError error) {
- if !strings.HasPrefix(path, cgroupRoot+"/") {
- return fmt.Errorf("invalid cgroup path %s", path)
- }
- content, err := ioutil.ReadFile(cgroupRoot + "/cgroup.controllers")
- if err != nil {
- return err
- }
- ctrs := bytes.Fields(content)
- res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...)
-
- current := "/sys/fs"
- elements := strings.Split(path, "/")
- for i, e := range elements[3:] {
- current = filepath.Join(current, e)
- if i > 0 {
- if err := os.Mkdir(current, 0o755); err != nil {
- if !os.IsExist(err) {
- return err
- }
- } else {
- // If the directory was created, be sure it is not left around on errors.
- defer func() {
- if deferredError != nil {
- os.Remove(current)
- }
- }()
- }
- }
- // We enable the controllers for all the path components except the last one. It is not allowed to add
- // PIDs if there are already enabled controllers.
- if i < len(elements[3:])-1 {
- if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
// initialize initializes the specified hierarchy
func (c *CgroupControl) initialize() (err error) {
createdSoFar := map[string]controllerHandler{}
@@ -332,23 +293,6 @@ func (c *CgroupControl) initialize() (err error) {
return nil
}
-func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) {
- cPath := c.getCgroupv1Path(controller)
- _, err := os.Stat(cPath)
- if err == nil {
- return false, nil
- }
-
- if !os.IsNotExist(err) {
- return false, err
- }
-
- if err := os.MkdirAll(cPath, 0o755); err != nil {
- return false, errors.Wrapf(err, "error creating cgroup for %s", controller)
- }
- return true, nil
-}
-
func readFileAsUint64(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go
new file mode 100644
index 000000000..4b72014bf
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go
@@ -0,0 +1,575 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/containers/storage/pkg/unshare"
+ systemdDbus "github.com/coreos/go-systemd/v22/dbus"
+ "github.com/godbus/dbus/v5"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ErrCgroupDeleted means the cgroup was deleted
+ ErrCgroupDeleted = errors.New("cgroup deleted")
+ // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment
+ ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments")
+ ErrStatCgroup = errors.New("no cgroup available for gathering user statistics")
+)
+
+// CgroupControl controls a cgroup hierarchy
+type CgroupControl struct {
+ cgroup2 bool
+ config *configs.Cgroup
+ systemd bool
+ // List of additional cgroup subsystems joined that
+ // do not have a custom handler.
+ additionalControllers []controller
+}
+
+type controller struct {
+ name string
+ symlink bool
+}
+
+type controllerHandler interface {
+ Create(*CgroupControl) (bool, error)
+ Apply(*CgroupControl, *configs.Resources) error
+ Destroy(*CgroupControl) error
+ Stat(*CgroupControl, *cgroups.Stats) error
+}
+
+const (
+ cgroupRoot = "/sys/fs/cgroup"
+ // CPU is the cpu controller
+ CPU = "cpu"
+ // CPUAcct is the cpuacct controller
+ CPUAcct = "cpuacct"
+ // CPUset is the cpuset controller
+ CPUset = "cpuset"
+ // Memory is the memory controller
+ Memory = "memory"
+ // Pids is the pids controller
+ Pids = "pids"
+ // Blkio is the blkio controller
+ Blkio = "blkio"
+)
+
+var handlers map[string]controllerHandler
+
+func init() {
+ handlers = make(map[string]controllerHandler)
+ handlers[CPU] = getCPUHandler()
+ handlers[CPUset] = getCpusetHandler()
+ handlers[Memory] = getMemoryHandler()
+ handlers[Pids] = getPidsHandler()
+ handlers[Blkio] = getBlkioHandler()
+}
+
+// getAvailableControllers get the available controllers
+func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) {
+ if cgroup2 {
+ controllers := []controller{}
+ controllersFile := cgroupRoot + "/cgroup.controllers"
+ // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit
+ if unshare.IsRootless() {
+ userSlice, err := getCgroupPathForCurrentProcess()
+ if err != nil {
+ return controllers, err
+ }
+ // userSlice already contains '/' so not adding here
+ basePath := cgroupRoot + userSlice
+ controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath)
+ }
+ controllersFileBytes, err := ioutil.ReadFile(controllersFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile)
+ }
+ for _, controllerName := range strings.Fields(string(controllersFileBytes)) {
+ c := controller{
+ name: controllerName,
+ symlink: false,
+ }
+ controllers = append(controllers, c)
+ }
+ return controllers, nil
+ }
+
+ subsystems, _ := cgroupV1GetAllSubsystems()
+ controllers := []controller{}
+ // cgroupv1 and rootless: No subsystem is available: delegation is unsafe.
+ if unshare.IsRootless() {
+ return controllers, nil
+ }
+
+ for _, name := range subsystems {
+ if _, found := exclude[name]; found {
+ continue
+ }
+ fileInfo, err := os.Stat(cgroupRoot + "/" + name)
+ if err != nil {
+ continue
+ }
+ c := controller{
+ name: name,
+ symlink: !fileInfo.IsDir(),
+ }
+ controllers = append(controllers, c)
+ }
+
+ return controllers, nil
+}
+
+// GetAvailableControllers get string:bool map of all the available controllers
+func GetAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) {
+ availableControllers, err := getAvailableControllers(exclude, cgroup2)
+ if err != nil {
+ return nil, err
+ }
+ controllerList := []string{}
+ for _, controller := range availableControllers {
+ controllerList = append(controllerList, controller.name)
+ }
+
+ return controllerList, nil
+}
+
+func cgroupV1GetAllSubsystems() ([]string, error) {
+ f, err := os.Open("/proc/cgroups")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ subsystems := []string{}
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ text := s.Text()
+ if text[0] != '#' {
+ parts := strings.Fields(text)
+ if len(parts) >= 4 && parts[3] != "0" {
+ subsystems = append(subsystems, parts[0])
+ }
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return subsystems, nil
+}
+
+func getCgroupPathForCurrentProcess() (string, error) {
+ path := fmt.Sprintf("/proc/%d/cgroup", os.Getpid())
+ f, err := os.Open(path)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ cgroupPath := ""
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ text := s.Text()
+ procEntries := strings.SplitN(text, "::", 2)
+ // set process cgroupPath only if entry is valid
+ if len(procEntries) > 1 {
+ cgroupPath = procEntries[1]
+ }
+ }
+ if err := s.Err(); err != nil {
+ return cgroupPath, err
+ }
+ return cgroupPath, nil
+}
+
+// getCgroupv1Path is a helper function to get the cgroup v1 path
+func (c *CgroupControl) getCgroupv1Path(name string) string {
+ return filepath.Join(cgroupRoot, name, c.config.Path)
+}
+
+// initialize initializes the specified hierarchy
+func (c *CgroupControl) initialize() (err error) {
+ createdSoFar := map[string]controllerHandler{}
+ defer func() {
+ if err != nil {
+ for name, ctr := range createdSoFar {
+ if err := ctr.Destroy(c); err != nil {
+ logrus.Warningf("error cleaning up controller %s for %s", name, c.config.Path)
+ }
+ }
+ }
+ }()
+ if c.cgroup2 {
+ if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.config.Path)); err != nil {
+ return errors.Wrapf(err, "error creating cgroup path %s", c.config.Path)
+ }
+ }
+ for name, handler := range handlers {
+ created, err := handler.Create(c)
+ if err != nil {
+ return err
+ }
+ if created {
+ createdSoFar[name] = handler
+ }
+ }
+
+ if !c.cgroup2 {
+ // We won't need to do this for cgroup v2
+ for _, ctr := range c.additionalControllers {
+ if ctr.symlink {
+ continue
+ }
+ path := c.getCgroupv1Path(ctr.name)
+ if err := os.MkdirAll(path, 0o755); err != nil {
+ return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name)
+ }
+ }
+ }
+
+ return nil
+}
+
+func readFileAsUint64(path string) (uint64, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ v := cleanString(string(data))
+ if v == "max" {
+ return math.MaxUint64, nil
+ }
+ ret, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return ret, errors.Wrapf(err, "parse %s from %s", v, path)
+ }
+ return ret, nil
+}
+
+func readFileByKeyAsUint64(path, key string) (uint64, error) {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ for _, line := range strings.Split(string(content), "\n") {
+ fields := strings.SplitN(line, " ", 2)
+ if fields[0] == key {
+ v := cleanString(string(fields[1]))
+ if v == "max" {
+ return math.MaxUint64, nil
+ }
+ ret, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return ret, errors.Wrapf(err, "parse %s from %s", v, path)
+ }
+ return ret, nil
+ }
+ }
+
+ return 0, fmt.Errorf("no key named %s from %s", key, path)
+}
+
+// New creates a new cgroup control
+func New(path string, resources *configs.Resources) (*CgroupControl, error) {
+ cgroup2, err := IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ control := &CgroupControl{
+ cgroup2: cgroup2,
+ config: &configs.Cgroup{
+ Path: path,
+ Resources: resources,
+ },
+ }
+
+ if !cgroup2 {
+ controllers, err := getAvailableControllers(handlers, false)
+ if err != nil {
+ return nil, err
+ }
+ control.additionalControllers = controllers
+ }
+
+ if err := control.initialize(); err != nil {
+ return nil, err
+ }
+
+ return control, nil
+}
+
+// NewSystemd creates a new cgroup control
+func NewSystemd(path string, resources *configs.Resources) (*CgroupControl, error) {
+ cgroup2, err := IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ control := &CgroupControl{
+ cgroup2: cgroup2,
+ systemd: true,
+ config: &configs.Cgroup{
+ Path: path,
+ Resources: resources,
+ Rootless: unshare.IsRootless(),
+ },
+ }
+
+ return control, nil
+}
+
+// Load loads an existing cgroup control
+func Load(path string) (*CgroupControl, error) {
+ cgroup2, err := IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ control := &CgroupControl{
+ cgroup2: cgroup2,
+ systemd: false,
+ config: &configs.Cgroup{
+ Path: path,
+ },
+ }
+ if !cgroup2 {
+ controllers, err := getAvailableControllers(handlers, false)
+ if err != nil {
+ return nil, err
+ }
+ control.additionalControllers = controllers
+ }
+ if !cgroup2 {
+ oneExists := false
+ // check that the cgroup exists at least under one controller
+ for name := range handlers {
+ p := control.getCgroupv1Path(name)
+ if _, err := os.Stat(p); err == nil {
+ oneExists = true
+ break
+ }
+ }
+
+ // if there is no controller at all, raise an error
+ if !oneExists {
+ if unshare.IsRootless() {
+ return nil, ErrCgroupV1Rootless
+ }
+ // compatible with the error code
+ // used by containerd/cgroups
+ return nil, ErrCgroupDeleted
+ }
+ }
+ return control, nil
+}
+
+// CreateSystemdUnit creates the systemd cgroup
+func (c *CgroupControl) CreateSystemdUnit(path string) error {
+ if !c.systemd {
+ return fmt.Errorf("the cgroup controller is not using systemd")
+ }
+
+ conn, err := systemdDbus.NewWithContext(context.TODO())
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ return systemdCreate(c.config.Resources, path, conn)
+}
+
+// GetUserConnection returns an user connection to D-BUS
+func GetUserConnection(uid int) (*systemdDbus.Conn, error) {
+ return systemdDbus.NewConnection(func() (*dbus.Conn, error) {
+ return dbusAuthConnection(uid, dbus.SessionBusPrivate)
+ })
+}
+
+// CreateSystemdUserUnit creates the systemd cgroup for the specified user
+func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error {
+ if !c.systemd {
+ return fmt.Errorf("the cgroup controller is not using systemd")
+ }
+
+ conn, err := GetUserConnection(uid)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ return systemdCreate(c.config.Resources, path, conn)
+}
+
+func dbusAuthConnection(uid int, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))}
+
+ err = conn.Auth(methods)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ if err := conn.Hello(); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+// Delete cleans a cgroup
+func (c *CgroupControl) Delete() error {
+ return c.DeleteByPath(c.config.Path)
+}
+
+// DeleteByPathConn deletes the specified cgroup path using the specified
+// dbus connection if needed.
+func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) error {
+ if c.systemd {
+ return systemdDestroyConn(path, conn)
+ }
+ if c.cgroup2 {
+ return rmDirRecursively(filepath.Join(cgroupRoot, c.config.Path))
+ }
+ var lastError error
+ for _, h := range handlers {
+ if err := h.Destroy(c); err != nil {
+ lastError = err
+ }
+ }
+
+ for _, ctr := range c.additionalControllers {
+ if ctr.symlink {
+ continue
+ }
+ p := c.getCgroupv1Path(ctr.name)
+ if err := rmDirRecursively(p); err != nil {
+ lastError = errors.Wrapf(err, "remove %s", p)
+ }
+ }
+ return lastError
+}
+
+// DeleteByPath deletes the specified cgroup path
+func (c *CgroupControl) DeleteByPath(path string) error {
+ if c.systemd {
+ conn, err := systemdDbus.NewWithContext(context.TODO())
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+ return c.DeleteByPathConn(path, conn)
+ }
+ return c.DeleteByPathConn(path, nil)
+}
+
+// Update updates the cgroups
+func (c *CgroupControl) Update(resources *configs.Resources) error {
+ for _, h := range handlers {
+ if err := h.Apply(c, resources); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddPid moves the specified pid to the cgroup
+func (c *CgroupControl) AddPid(pid int) error {
+ pidString := []byte(fmt.Sprintf("%d\n", pid))
+
+ if c.cgroup2 {
+ path := filepath.Join(cgroupRoot, c.config.Path)
+ return fs2.CreateCgroupPath(path, c.config)
+ }
+
+ names := make([]string, 0, len(handlers))
+ for n := range handlers {
+ names = append(names, n)
+ }
+
+ for _, c := range c.additionalControllers {
+ if !c.symlink {
+ names = append(names, c.name)
+ }
+ }
+
+ for _, n := range names {
+ // If we aren't using cgroup2, we won't write correctly to unified hierarchy
+ if !c.cgroup2 && n == "unified" {
+ continue
+ }
+ p := filepath.Join(c.getCgroupv1Path(n), "tasks")
+ if err := ioutil.WriteFile(p, pidString, 0o644); err != nil {
+ return errors.Wrapf(err, "write %s", p)
+ }
+ }
+ return nil
+}
+
+// Stat returns usage statistics for the cgroup
+func (c *CgroupControl) Stat() (*cgroups.Stats, error) {
+ m := cgroups.Stats{}
+ found := false
+ for _, h := range handlers {
+ if err := h.Stat(c, &m); err != nil {
+ if !os.IsNotExist(errors.Cause(err)) {
+ return nil, err
+ }
+ logrus.Warningf("Failed to retrieve cgroup stats: %v", err)
+ continue
+ }
+ found = true
+ }
+ if !found {
+ return nil, ErrStatCgroup
+ }
+ return &m, nil
+}
+
+func readCgroup2MapPath(path string) (map[string][]string, error) {
+ ret := map[string][]string{}
+ f, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return ret, nil
+ }
+ return nil, errors.Wrapf(err, "open file %s", path)
+ }
+ defer f.Close()
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(line)
+ if len(parts) < 2 {
+ continue
+ }
+ ret[parts[0]] = parts[1:]
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, errors.Wrapf(err, "parsing file %s", path)
+ }
+ return ret, nil
+}
+
+func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) {
+ p := filepath.Join(cgroupRoot, ctr.config.Path, name)
+
+ return readCgroup2MapPath(p)
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
index 5c6c199e0..3e7653672 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
@@ -99,7 +99,7 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) {
func rmDirRecursively(path string) error {
killProcesses := func(signal syscall.Signal) {
if signal == unix.SIGKILL {
- if err := ioutil.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0600); err == nil {
+ if err := ioutil.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0o600); err == nil {
return
}
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go
index c9e94f269..fff76b9e2 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cpu.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cpu.go
@@ -1,12 +1,12 @@
+//go:build !linux
+// +build !linux
+
package cgroups
import (
"fmt"
- "io/ioutil"
"os"
- "path/filepath"
"strconv"
- "strings"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -18,36 +18,6 @@ func getCPUHandler() *cpuHandler {
return &cpuHandler{}
}
-func cleanString(s string) string {
- return strings.Trim(s, "\n")
-}
-
-func readAcct(ctr *CgroupControl, name string) (uint64, error) {
- p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name)
- return readFileAsUint64(p)
-}
-
-func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) {
- p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name)
- data, err := ioutil.ReadFile(p)
- if err != nil {
- return nil, errors.Wrapf(err, "reading %s", p)
- }
- r := []uint64{}
- for _, s := range strings.Split(string(data), " ") {
- s = cleanString(s)
- if s == "" {
- break
- }
- v, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, errors.Wrapf(err, "parsing %s", s)
- }
- r = append(r, v)
- }
- return r, nil
-}
-
// Apply set the specified constraints
func (c *cpuHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error {
if res.CPU == nil {
@@ -119,41 +89,3 @@ func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error {
m.CPU = CPUMetrics{Usage: usage}
return nil
}
-
-// GetSystemCPUUsage returns the system usage for all the cgroups
-func GetSystemCPUUsage() (uint64, error) {
- cgroupv2, err := IsCgroup2UnifiedMode()
- if err != nil {
- return 0, err
- }
- if !cgroupv2 {
- p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage")
- return readFileAsUint64(p)
- }
-
- files, err := ioutil.ReadDir(cgroupRoot)
- if err != nil {
- return 0, err
- }
- var total uint64
- for _, file := range files {
- if !file.IsDir() {
- continue
- }
- p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat")
-
- values, err := readCgroup2MapPath(p)
- if err != nil {
- return 0, err
- }
-
- if val, found := values["usage_usec"]; found {
- v, err := strconv.ParseUint(cleanString(val[0]), 10, 64)
- if err != nil {
- return 0, err
- }
- total += v * 1000
- }
- }
- return total, nil
-}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go
new file mode 100644
index 000000000..bca55575c
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go
@@ -0,0 +1,100 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/pkg/errors"
+)
+
+type linuxCPUHandler struct {
+ CPU fs.CpuGroup
+}
+
+func getCPUHandler() *linuxCPUHandler {
+ return &linuxCPUHandler{}
+}
+
+// Apply set the specified constraints
+func (c *linuxCPUHandler) Apply(ctr *CgroupControl, res *configs.Resources) error {
+ if ctr.cgroup2 {
+ man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path))
+ if err != nil {
+ return err
+ }
+ return man.Set(res)
+ }
+ path := filepath.Join(cgroupRoot, CPU, ctr.config.Path)
+ return c.CPU.Set(path, res)
+}
+
+// Create the cgroup
+func (c *linuxCPUHandler) Create(ctr *CgroupControl) (bool, error) {
+ if ctr.cgroup2 {
+ return false, nil
+ }
+ return ctr.createCgroupDirectory(CPU)
+}
+
+// Destroy the cgroup
+func (c *linuxCPUHandler) Destroy(ctr *CgroupControl) error {
+ return rmDirRecursively(ctr.getCgroupv1Path(CPU))
+}
+
+// Stat fills a metrics structure with usage stats for the controller
+func (c *linuxCPUHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
+ var err error
+ cpu := cgroups.CpuStats{}
+ if ctr.cgroup2 {
+ values, err := readCgroup2MapFile(ctr, "cpu.stat")
+ if err != nil {
+ return err
+ }
+ if val, found := values["usage_usec"]; found {
+ cpu.CpuUsage.TotalUsage, err = strconv.ParseUint(cleanString(val[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ cpu.CpuUsage.UsageInKernelmode *= 1000
+ }
+ if val, found := values["system_usec"]; found {
+ cpu.CpuUsage.UsageInKernelmode, err = strconv.ParseUint(cleanString(val[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ cpu.CpuUsage.TotalUsage *= 1000
+ }
+ } else {
+ cpu.CpuUsage.TotalUsage, err = readAcct(ctr, "cpuacct.usage")
+ if err != nil {
+ if !os.IsNotExist(errors.Cause(err)) {
+ return err
+ }
+ cpu.CpuUsage.TotalUsage = 0
+ }
+ cpu.CpuUsage.UsageInKernelmode, err = readAcct(ctr, "cpuacct.usage_sys")
+ if err != nil {
+ if !os.IsNotExist(errors.Cause(err)) {
+ return err
+ }
+ cpu.CpuUsage.UsageInKernelmode = 0
+ }
+ cpu.CpuUsage.PercpuUsage, err = readAcctList(ctr, "cpuacct.usage_percpu")
+ if err != nil {
+ if !os.IsNotExist(errors.Cause(err)) {
+ return err
+ }
+ cpu.CpuUsage.PercpuUsage = nil
+ }
+ }
+ m.CpuStats = cpu
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go
index 2bfeb80db..f7ec9a33b 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go
@@ -1,52 +1,17 @@
+//go:build !linux
+// +build !linux
+
package cgroups
import (
"fmt"
- "io/ioutil"
"path/filepath"
- "strings"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
type cpusetHandler struct{}
-func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) {
- if dir == cgroupRoot {
- return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file)
- }
- path := filepath.Join(dir, file)
- parentPath := path
- if cgroupv2 {
- parentPath = fmt.Sprintf("%s.effective", parentPath)
- }
- data, err := ioutil.ReadFile(parentPath)
- if err != nil {
- return nil, errors.Wrapf(err, "open %s", path)
- }
- if strings.Trim(string(data), "\n") != "" {
- return data, nil
- }
- data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2)
- if err != nil {
- return nil, err
- }
- if err := ioutil.WriteFile(path, data, 0o644); err != nil {
- return nil, errors.Wrapf(err, "write %s", path)
- }
- return data, nil
-}
-
-func cpusetCopyFromParent(path string, cgroupv2 bool) error {
- for _, file := range []string{"cpuset.cpus", "cpuset.mems"} {
- if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil {
- return err
- }
- }
- return nil
-}
-
func getCpusetHandler() *cpusetHandler {
return &cpusetHandler{}
}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go
new file mode 100644
index 000000000..a4cc2acaf
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go
@@ -0,0 +1,57 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "path/filepath"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type linuxCpusetHandler struct {
+ CPUSet fs.CpusetGroup
+}
+
+func getCpusetHandler() *linuxCpusetHandler {
+ return &linuxCpusetHandler{}
+}
+
+// Apply set the specified constraints
+func (c *linuxCpusetHandler) Apply(ctr *CgroupControl, res *configs.Resources) error {
+ if ctr.cgroup2 {
+ man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path))
+ if err != nil {
+ return err
+ }
+ return man.Set(res)
+ }
+ path := filepath.Join(cgroupRoot, CPUset, ctr.config.Path)
+ return c.CPUSet.Set(path, res)
+}
+
+// Create the cgroup
+func (c *linuxCpusetHandler) Create(ctr *CgroupControl) (bool, error) {
+ if ctr.cgroup2 {
+ path := filepath.Join(cgroupRoot, ctr.config.Path)
+ return true, cpusetCopyFromParent(path, true)
+ }
+ created, err := ctr.createCgroupDirectory(CPUset)
+ if !created || err != nil {
+ return created, err
+ }
+ return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset), false)
+}
+
+// Destroy the cgroup
+func (c *linuxCpusetHandler) Destroy(ctr *CgroupControl) error {
+ return rmDirRecursively(ctr.getCgroupv1Path(CPUset))
+}
+
+// Stat fills a metrics structure with usage stats for the controller
+func (c *linuxCpusetHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory.go b/vendor/github.com/containers/common/pkg/cgroups/memory.go
index 10d65893c..b597b85bf 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/memory.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/memory.go
@@ -1,3 +1,6 @@
+//go:build !linux
+// +build !linux
+
package cgroups
import (
diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go
new file mode 100644
index 000000000..5d2bf5d0e
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go
@@ -0,0 +1,78 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "path/filepath"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type linuxMemHandler struct {
+ Mem fs.MemoryGroup
+}
+
+func getMemoryHandler() *linuxMemHandler {
+ return &linuxMemHandler{}
+}
+
+// Apply set the specified constraints
+func (c *linuxMemHandler) Apply(ctr *CgroupControl, res *configs.Resources) error {
+ if ctr.cgroup2 {
+ man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path))
+ if err != nil {
+ return err
+ }
+ return man.Set(res)
+ }
+ path := filepath.Join(cgroupRoot, Memory, ctr.config.Path)
+ return c.Mem.Set(path, res)
+}
+
+// Create the cgroup
+func (c *linuxMemHandler) Create(ctr *CgroupControl) (bool, error) {
+ if ctr.cgroup2 {
+ return false, nil
+ }
+ return ctr.createCgroupDirectory(Memory)
+}
+
+// Destroy the cgroup
+func (c *linuxMemHandler) Destroy(ctr *CgroupControl) error {
+ return rmDirRecursively(ctr.getCgroupv1Path(Memory))
+}
+
+// Stat fills a metrics structure with usage stats for the controller
+func (c *linuxMemHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
+ var err error
+ memUsage := cgroups.MemoryStats{}
+
+ var memoryRoot string
+ var limitFilename string
+
+ if ctr.cgroup2 {
+ memoryRoot = filepath.Join(cgroupRoot, ctr.config.Path)
+ limitFilename = "memory.max"
+ if memUsage.Usage.Usage, err = readFileByKeyAsUint64(filepath.Join(memoryRoot, "memory.stat"), "anon"); err != nil {
+ return err
+ }
+ } else {
+ memoryRoot = ctr.getCgroupv1Path(Memory)
+ limitFilename = "memory.limit_in_bytes"
+ if memUsage.Usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil {
+ return err
+ }
+ }
+
+ memUsage.Usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename))
+ if err != nil {
+ return err
+ }
+
+ m.MemoryStats = memUsage
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go
index 650120a56..1cb7ced82 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/pids.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/pids.go
@@ -1,3 +1,6 @@
+//go:build !linux
+// +build !linux
+
package cgroups
import (
diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go
new file mode 100644
index 000000000..a8163ce46
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go
@@ -0,0 +1,71 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "path/filepath"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type linuxPidHandler struct {
+ Pid fs.PidsGroup
+}
+
+func getPidsHandler() *linuxPidHandler {
+ return &linuxPidHandler{}
+}
+
+// Apply set the specified constraints
+func (c *linuxPidHandler) Apply(ctr *CgroupControl, res *configs.Resources) error {
+ if ctr.cgroup2 {
+ man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path))
+ if err != nil {
+ return err
+ }
+ return man.Set(res)
+ }
+
+ path := filepath.Join(cgroupRoot, Pids, ctr.config.Path)
+ return c.Pid.Set(path, res)
+}
+
+// Create the cgroup
+func (c *linuxPidHandler) Create(ctr *CgroupControl) (bool, error) {
+ if ctr.cgroup2 {
+ return false, nil
+ }
+ return ctr.createCgroupDirectory(Pids)
+}
+
+// Destroy the cgroup
+func (c *linuxPidHandler) Destroy(ctr *CgroupControl) error {
+ return rmDirRecursively(ctr.getCgroupv1Path(Pids))
+}
+
+// Stat fills a metrics structure with usage stats for the controller
+func (c *linuxPidHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error {
+ if ctr.config.Path == "" {
+ // nothing we can do to retrieve the pids.current path
+ return nil
+ }
+
+ var PIDRoot string
+ if ctr.cgroup2 {
+ PIDRoot = filepath.Join(cgroupRoot, ctr.config.Path)
+ } else {
+ PIDRoot = ctr.getCgroupv1Path(Pids)
+ }
+
+ current, err := readFileAsUint64(filepath.Join(PIDRoot, "pids.current"))
+ if err != nil {
+ return err
+ }
+
+ m.PidsStats.Current = current
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go
index 92065a2d7..118fa97a1 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/systemd.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/systemd.go
@@ -1,3 +1,6 @@
+//go:build !linux
+// +build !linux
+
package cgroups
import (
@@ -52,15 +55,11 @@ func systemdCreate(path string, c *systemdDbus.Conn) error {
/*
systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that
has the following license:
-
Copyright The containerd Authors.
-
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
https://www.apache.org/licenses/LICENSE-2.0
-
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
new file mode 100644
index 000000000..ee9f584de
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
@@ -0,0 +1,167 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ systemdDbus "github.com/coreos/go-systemd/v22/dbus"
+ "github.com/godbus/dbus/v5"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Conn) error {
+ slice, name := filepath.Split(path)
+ slice = strings.TrimSuffix(slice, "/")
+
+ var lastError error
+ for i := 0; i < 2; i++ {
+ properties := []systemdDbus.Property{
+ systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
+ systemdDbus.PropWants(slice),
+ }
+ pMap := map[string]bool{
+ "DefaultDependencies": false,
+ "MemoryAccounting": true,
+ "CPUAccounting": true,
+ "BlockIOAccounting": true,
+ }
+ if i == 0 {
+ pMap["Delegate"] = true
+ }
+
+ for k, v := range pMap {
+ p := systemdDbus.Property{
+ Name: k,
+ Value: dbus.MakeVariant(v),
+ }
+ properties = append(properties, p)
+ }
+
+ uMap, sMap, bMap, iMap := resourcesToProps(resources)
+ for k, v := range uMap {
+ p := systemdDbus.Property{
+ Name: k,
+ Value: dbus.MakeVariant(v),
+ }
+ properties = append(properties, p)
+ }
+
+ for k, v := range sMap {
+ p := systemdDbus.Property{
+ Name: k,
+ Value: dbus.MakeVariant(v),
+ }
+ properties = append(properties, p)
+ }
+
+ for k, v := range bMap {
+ p := systemdDbus.Property{
+ Name: k,
+ Value: dbus.MakeVariant(v),
+ }
+ properties = append(properties, p)
+ }
+
+ for k, v := range iMap {
+ p := systemdDbus.Property{
+ Name: k,
+ Value: dbus.MakeVariant(v),
+ }
+ properties = append(properties, p)
+ }
+
+ ch := make(chan string)
+ _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch)
+ if err != nil {
+ lastError = err
+ continue
+ }
+ <-ch
+ return nil
+ }
+ return lastError
+}
+
+/*
+ systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that
+ has the following license:
+
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+func systemdDestroyConn(path string, c *systemdDbus.Conn) error {
+ name := filepath.Base(path)
+
+ ch := make(chan string)
+ _, err := c.StopUnitContext(context.TODO(), name, "replace", ch)
+ if err != nil {
+ return err
+ }
+ <-ch
+ return nil
+}
+
+func resourcesToProps(res *configs.Resources) (map[string]uint64, map[string]string, map[string][]byte, map[string]int64) {
+ bMap := make(map[string][]byte)
+ // this array is not used but will be once more resource limits are added
+ sMap := make(map[string]string)
+ iMap := make(map[string]int64)
+ uMap := make(map[string]uint64)
+
+ // CPU
+ if res.CpuPeriod != 0 {
+ uMap["CPUQuotaPeriodUSec"] = res.CpuPeriod
+ }
+ if res.CpuQuota != 0 {
+ period := res.CpuPeriod
+ if period == 0 {
+ period = uint64(100000)
+ }
+ cpuQuotaPerSecUSec := uint64(res.CpuQuota*1000000) / period
+ if cpuQuotaPerSecUSec%10000 != 0 {
+ cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000
+ }
+ uMap["CPUQuotaPerSecUSec"] = cpuQuotaPerSecUSec
+ }
+
+ // CPUSet
+ if res.CpusetCpus != "" {
+ bits := []byte(res.CpusetCpus)
+ bMap["AllowedCPUs"] = bits
+ }
+ if res.CpusetMems != "" {
+ bits := []byte(res.CpusetMems)
+ bMap["AllowedMemoryNodes"] = bits
+ }
+
+ // Mem
+ if res.Memory != 0 {
+ uMap["MemoryMax"] = uint64(res.Memory)
+ }
+ if res.MemorySwap != 0 {
+ uMap["MemorySwapMax"] = uint64(res.MemorySwap)
+ }
+
+ // Blkio
+ if res.BlkioWeight > 0 {
+ uMap["BlockIOWeight"] = uint64(res.BlkioWeight)
+ }
+
+ return uMap, sMap, bMap, iMap
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils.go b/vendor/github.com/containers/common/pkg/cgroups/utils.go
new file mode 100644
index 000000000..1fd45f40b
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/utils.go
@@ -0,0 +1,176 @@
+package cgroups
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+var TestMode bool
+
+func cleanString(s string) string {
+ return strings.Trim(s, "\n")
+}
+
+func readAcct(ctr *CgroupControl, name string) (uint64, error) {
+ p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name)
+ return readFileAsUint64(p)
+}
+
+func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) {
+ p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name)
+ data, err := ioutil.ReadFile(p)
+ if err != nil {
+ return nil, errors.Wrapf(err, "reading %s", p)
+ }
+ r := []uint64{}
+ for _, s := range strings.Split(string(data), " ") {
+ s = cleanString(s)
+ if s == "" {
+ break
+ }
+ v, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, errors.Wrapf(err, "parsing %s", s)
+ }
+ r = append(r, v)
+ }
+ return r, nil
+}
+
+// GetSystemCPUUsage returns the system usage for all the cgroups
+func GetSystemCPUUsage() (uint64, error) {
+ cgroupv2, err := IsCgroup2UnifiedMode()
+ if err != nil {
+ return 0, err
+ }
+ if !cgroupv2 {
+ p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage")
+ return readFileAsUint64(p)
+ }
+
+ files, err := ioutil.ReadDir(cgroupRoot)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ for _, file := range files {
+ if !file.IsDir() {
+ continue
+ }
+ p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat")
+
+ values, err := readCgroup2MapPath(p)
+ if err != nil {
+ return 0, err
+ }
+
+ if val, found := values["usage_usec"]; found {
+ v, err := strconv.ParseUint(cleanString(val[0]), 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ total += v * 1000
+ }
+ }
+ return total, nil
+}
+
+func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) {
+ if dir == cgroupRoot {
+ return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file)
+ }
+ path := filepath.Join(dir, file)
+ parentPath := path
+ if cgroupv2 {
+ parentPath = fmt.Sprintf("%s.effective", parentPath)
+ }
+ data, err := ioutil.ReadFile(parentPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "open %s", path)
+ }
+ if strings.Trim(string(data), "\n") != "" {
+ return data, nil
+ }
+ data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2)
+ if err != nil {
+ return nil, err
+ }
+ if err := ioutil.WriteFile(path, data, 0o644); err != nil {
+ return nil, errors.Wrapf(err, "write %s", path)
+ }
+ return data, nil
+}
+
+func cpusetCopyFromParent(path string, cgroupv2 bool) error {
+ for _, file := range []string{"cpuset.cpus", "cpuset.mems"} {
+ if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers
+func createCgroupv2Path(path string) (deferredError error) {
+ if !strings.HasPrefix(path, cgroupRoot+"/") {
+ return fmt.Errorf("invalid cgroup path %s", path)
+ }
+ content, err := ioutil.ReadFile(cgroupRoot + "/cgroup.controllers")
+ if err != nil {
+ return err
+ }
+ ctrs := bytes.Fields(content)
+ res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...)
+
+ current := "/sys/fs"
+ elements := strings.Split(path, "/")
+ for i, e := range elements[3:] {
+ current = filepath.Join(current, e)
+ if i > 0 {
+ if err := os.Mkdir(current, 0o755); err != nil {
+ if !os.IsExist(err) {
+ return err
+ }
+ } else {
+ // If the directory was created, be sure it is not left around on errors.
+ defer func() {
+ if deferredError != nil {
+ os.Remove(current)
+ }
+ }()
+ }
+ }
+ // We enable the controllers for all the path components except the last one. It is not allowed to add
+ // PIDs if there are already enabled controllers.
+ if i < len(elements[3:])-1 {
+ if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) {
+ cPath := c.getCgroupv1Path(controller)
+ _, err := os.Stat(cPath)
+ if err == nil {
+ return false, nil
+ }
+
+ if !os.IsNotExist(err) {
+ return false, err
+ }
+
+ if err := os.MkdirAll(cPath, 0o755); err != nil {
+ return false, errors.Wrapf(err, "error creating cgroup for %s", controller)
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go
new file mode 100644
index 000000000..bd37042cd
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go
@@ -0,0 +1,146 @@
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// WriteFile writes to a cgroup file
+func WriteFile(dir, file, data string) error {
+ fd, err := OpenFile(dir, file, unix.O_WRONLY)
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+ for {
+ _, err := fd.Write([]byte(data))
+ if errors.Is(err, unix.EINTR) {
+ logrus.Infof("interrupted while writing %s to %s", data, fd.Name())
+ continue
+ }
+ return err
+ }
+}
+
+// OpenFile opens a cgroup file with the given flags
+func OpenFile(dir, file string, flags int) (*os.File, error) {
+ var resolveFlags uint64
+ mode := os.FileMode(0)
+ if TestMode && flags&os.O_WRONLY != 0 {
+ flags |= os.O_TRUNC | os.O_CREATE
+ mode = 0o600
+ }
+ cgroupPath := path.Join(dir, file)
+ relPath := strings.TrimPrefix(cgroupPath, cgroupRoot+"/")
+
+ var stats unix.Statfs_t
+ fdTest, errOpen := unix.Openat2(-1, cgroupRoot, &unix.OpenHow{
+ Flags: unix.O_DIRECTORY | unix.O_PATH,
+ })
+ errStat := unix.Fstatfs(fdTest, &stats)
+ cgroupFd := fdTest
+
+ resolveFlags = unix.RESOLVE_BENEATH | unix.RESOLVE_NO_MAGICLINKS
+ if stats.Type == unix.CGROUP2_SUPER_MAGIC {
+ // cgroupv2 has a single mountpoint and no "cpu,cpuacct" symlinks
+ resolveFlags |= unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_SYMLINKS
+ }
+
+ if errOpen != nil || errStat != nil || (len(relPath) == len(cgroupPath)) { // openat2 not available, use os
+ fdTest, err := os.OpenFile(cgroupPath, flags, mode)
+ if err != nil {
+ return nil, err
+ }
+ if TestMode {
+ return fdTest, nil
+ }
+ if err := unix.Fstatfs(int(fdTest.Fd()), &stats); err != nil {
+ _ = fdTest.Close()
+ return nil, &os.PathError{Op: "statfs", Path: cgroupPath, Err: err}
+ }
+ if stats.Type != unix.CGROUP_SUPER_MAGIC && stats.Type != unix.CGROUP2_SUPER_MAGIC {
+ _ = fdTest.Close()
+ return nil, &os.PathError{Op: "open", Path: cgroupPath, Err: errors.New("not a cgroup file")}
+ }
+ return fdTest, nil
+ }
+
+ fd, err := unix.Openat2(cgroupFd, relPath,
+ &unix.OpenHow{
+ Resolve: resolveFlags,
+ Flags: uint64(flags) | unix.O_CLOEXEC,
+ Mode: uint64(mode),
+ })
+ if err != nil {
+ fmt.Println("Error in openat")
+ return nil, err
+ }
+
+ return os.NewFile(uintptr(fd), cgroupPath), nil
+}
+
+// ReadFile reads from a cgroup file, opening it with the read only flag
+func ReadFile(dir, file string) (string, error) {
+ fd, err := OpenFile(dir, file, unix.O_RDONLY)
+ if err != nil {
+ return "", err
+ }
+ defer fd.Close()
+ var buf bytes.Buffer
+
+ _, err = buf.ReadFrom(fd)
+ return buf.String(), err
+}
+
+// GetBlkioFiles gets the proper files for blkio weights
+func GetBlkioFiles(cgroupPath string) (wtFile, wtDevFile string) {
+ var weightFile string
+ var weightDeviceFile string
+ // in this important since runc keeps these variables private, they won't be set
+ if cgroups.PathExists(filepath.Join(cgroupPath, "blkio.weight")) {
+ weightFile = "blkio.weight"
+ weightDeviceFile = "blkio.weight_device"
+ } else {
+ weightFile = "blkio.bfq.weight"
+ weightDeviceFile = "blkio.bfq.weight_device"
+ }
+ return weightFile, weightDeviceFile
+}
+
+// SetBlkioThrottle sets the throttle limits for the cgroup
+func SetBlkioThrottle(res *configs.Resources, cgroupPath string) error {
+ for _, td := range res.BlkioThrottleReadBpsDevice {
+ if err := WriteFile(cgroupPath, "blkio.throttle.read_bps_device", fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)); err != nil {
+ return err
+ }
+ }
+ for _, td := range res.BlkioThrottleWriteBpsDevice {
+ if err := WriteFile(cgroupPath, "blkio.throttle.write_bps_device", fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)); err != nil {
+ return err
+ }
+ }
+ for _, td := range res.BlkioThrottleReadIOPSDevice {
+ if err := WriteFile(cgroupPath, "blkio.throttle.read_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range res.BlkioThrottleWriteIOPSDevice {
+ if err := WriteFile(cgroupPath, "blkio.throttle.write_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 25572968f..e3d19ee88 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -1,6 +1,7 @@
package config
import (
+ "errors"
"fmt"
"io/fs"
"os"
@@ -13,10 +14,10 @@ import (
"github.com/BurntSushi/toml"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/capabilities"
+ "github.com/containers/common/pkg/util"
"github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
selinux "github.com/opencontainers/selinux/go-selinux"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -46,6 +47,8 @@ const (
BoltDBStateStore RuntimeStateStore = iota
)
+var validImageVolumeModes = []string{"bind", "tmpfs", "ignore"}
+
// ProxyEnv is a list of Proxy Environment variables
var ProxyEnv = []string{
"http_proxy",
@@ -77,7 +80,6 @@ type Config struct {
// ContainersConfig represents the "containers" TOML config table
// containers global options for containers tools
type ContainersConfig struct {
-
// Devices to add to all containers
Devices []string `toml:"devices,omitempty"`
@@ -294,6 +296,10 @@ type EngineConfig struct {
// Building/committing defaults to OCI.
ImageDefaultFormat string `toml:"image_default_format,omitempty"`
+ // ImageVolumeMode Tells container engines how to handle the builtin
+ // image volumes. Acceptable values are "bind", "tmpfs", and "ignore".
+ ImageVolumeMode string `toml:"image_volume_mode,omitempty"`
+
// InfraCommand is the command run to start up a pod infra container.
InfraCommand string `toml:"infra_command,omitempty"`
@@ -604,14 +610,14 @@ func NewConfig(userConfigPath string) (*Config, error) {
// Now, gather the system configs and merge them as needed.
configs, err := systemConfigs()
if err != nil {
- return nil, errors.Wrap(err, "finding config on system")
+ return nil, fmt.Errorf("finding config on system: %w", err)
}
for _, path := range configs {
// Merge changes in later configs with the previous configs.
// Each config file that specified fields, will override the
// previous fields.
if err = readConfigFromFile(path, config); err != nil {
- return nil, errors.Wrapf(err, "reading system config %q", path)
+ return nil, fmt.Errorf("reading system config %q: %w", path, err)
}
logrus.Debugf("Merged system config %q", path)
logrus.Tracef("%+v", config)
@@ -624,7 +630,7 @@ func NewConfig(userConfigPath string) (*Config, error) {
// readConfigFromFile reads in container config in the specified
// file and then merge changes with the current default.
if err = readConfigFromFile(userConfigPath, config); err != nil {
- return nil, errors.Wrapf(err, "reading user config %q", userConfigPath)
+ return nil, fmt.Errorf("reading user config %q: %w", userConfigPath, err)
}
logrus.Debugf("Merged user config %q", userConfigPath)
logrus.Tracef("%+v", config)
@@ -650,7 +656,7 @@ func readConfigFromFile(path string, config *Config) error {
logrus.Tracef("Reading configuration file %q", path)
meta, err := toml.DecodeFile(path, config)
if err != nil {
- return errors.Wrapf(err, "decode configuration %v", path)
+ return fmt.Errorf("decode configuration %v: %w", path, err)
}
keys := meta.Undecoded()
if len(keys) > 0 {
@@ -704,7 +710,7 @@ func systemConfigs() ([]string, error) {
path := os.Getenv("CONTAINERS_CONF")
if path != "" {
if _, err := os.Stat(path); err != nil {
- return nil, errors.Wrap(err, "CONTAINERS_CONF file")
+ return nil, fmt.Errorf("CONTAINERS_CONF file: %w", err)
}
return append(configs, path), nil
}
@@ -779,7 +785,7 @@ func (c *Config) addCAPPrefix() {
// Validate is the main entry point for library configuration validation.
func (c *Config) Validate() error {
if err := c.Containers.Validate(); err != nil {
- return errors.Wrap(err, "validating containers config")
+ return fmt.Errorf("validating containers config: %w", err)
}
if !c.Containers.EnableLabeling {
@@ -787,11 +793,11 @@ func (c *Config) Validate() error {
}
if err := c.Engine.Validate(); err != nil {
- return errors.Wrap(err, "validating engine configs")
+ return fmt.Errorf("validating engine configs: %w", err)
}
if err := c.Network.Validate(); err != nil {
- return errors.Wrap(err, "validating network configs")
+ return fmt.Errorf("validating network configs %w", err)
}
return nil
@@ -821,11 +827,14 @@ func (c *EngineConfig) Validate() error {
return err
}
+ if err := ValidateImageVolumeMode(c.ImageVolumeMode); err != nil {
+ return err
+ }
// Check if the pullPolicy from containers.conf is valid
// if it is invalid returns the error
pullPolicy := strings.ToLower(c.PullPolicy)
if _, err := ValidatePullPolicy(pullPolicy); err != nil {
- return errors.Wrapf(err, "invalid pull type from containers.conf %q", c.PullPolicy)
+ return fmt.Errorf("invalid pull type from containers.conf %q: %w", c.PullPolicy, err)
}
return nil
}
@@ -851,11 +860,11 @@ func (c *ContainersConfig) Validate() error {
}
if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize {
- return errors.Errorf("log size max should be negative or >= %d", OCIBufSize)
+ return fmt.Errorf("log size max should be negative or >= %d", OCIBufSize)
}
if _, err := units.FromHumanSize(c.ShmSize); err != nil {
- return errors.Errorf("invalid --shm-size %s, %q", c.ShmSize, err)
+ return fmt.Errorf("invalid --shm-size %s, %q", c.ShmSize, err)
}
return nil
@@ -869,11 +878,11 @@ func (c *NetworkConfig) Validate() error {
if &c.DefaultSubnetPools != &DefaultSubnetPools {
for _, pool := range c.DefaultSubnetPools {
if pool.Base.IP.To4() == nil {
- return errors.Errorf("invalid subnet pool ip %q", pool.Base.IP)
+ return fmt.Errorf("invalid subnet pool ip %q", pool.Base.IP)
}
ones, _ := pool.Base.IPNet.Mask.Size()
if ones > pool.Size {
- return errors.Errorf("invalid subnet pool, size is bigger than subnet %q", &pool.Base.IPNet)
+ return fmt.Errorf("invalid subnet pool, size is bigger than subnet %q", &pool.Base.IPNet)
}
if pool.Size > 32 {
return errors.New("invalid subnet pool size, must be between 0-32")
@@ -891,7 +900,7 @@ func (c *NetworkConfig) Validate() error {
}
}
- return errors.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ","))
+ return fmt.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ","))
}
// FindConmon iterates over (*Config).ConmonPath and returns the path
@@ -928,14 +937,12 @@ func (c *Config) FindConmon() (string, error) {
}
if foundOutdatedConmon {
- return "", errors.Wrapf(ErrConmonOutdated,
- "please update to v%d.%d.%d or later",
- _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion)
+ return "", fmt.Errorf("please update to v%d.%d.%d or later: %w",
+ _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion, ErrConmonOutdated)
}
- return "", errors.Wrapf(ErrInvalidArg,
- "could not find a working conmon binary (configured options: %v)",
- c.Engine.ConmonPath)
+ return "", fmt.Errorf("could not find a working conmon binary (configured options: %v: %w)",
+ c.Engine.ConmonPath, ErrInvalidArg)
}
// GetDefaultEnv returns the environment variables for the container.
@@ -992,7 +999,7 @@ func Device(device string) (src, dst, permissions string, err error) {
switch len(split) {
case 3:
if !IsValidDeviceMode(split[2]) {
- return "", "", "", errors.Errorf("invalid device mode: %s", split[2])
+ return "", "", "", fmt.Errorf("invalid device mode: %s", split[2])
}
permissions = split[2]
fallthrough
@@ -1001,18 +1008,18 @@ func Device(device string) (src, dst, permissions string, err error) {
permissions = split[1]
} else {
if split[1] == "" || split[1][0] != '/' {
- return "", "", "", errors.Errorf("invalid device mode: %s", split[1])
+ return "", "", "", fmt.Errorf("invalid device mode: %s", split[1])
}
dst = split[1]
}
fallthrough
case 1:
if !strings.HasPrefix(split[0], "/dev/") {
- return "", "", "", errors.Errorf("invalid device mode: %s", split[0])
+ return "", "", "", fmt.Errorf("invalid device mode: %s", split[0])
}
src = split[0]
default:
- return "", "", "", errors.Errorf("invalid device specification: %s", device)
+ return "", "", "", fmt.Errorf("invalid device specification: %s", device)
}
if dst == "" {
@@ -1195,14 +1202,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) {
case connEnv != "":
d, found := c.Engine.ServiceDestinations[connEnv]
if !found {
- return "", "", errors.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv)
+ return "", "", fmt.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv)
}
return d.URI, d.Identity, nil
case c.Engine.ActiveService != "":
d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService]
if !found {
- return "", "", errors.Errorf("%q service destination not found", c.Engine.ActiveService)
+ return "", "", fmt.Errorf("%q service destination not found", c.Engine.ActiveService)
}
return d.URI, d.Identity, nil
case c.Engine.RemoteURI != "":
@@ -1232,9 +1239,9 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error)
}
configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries."
if len(c.Engine.HelperBinariesDir) == 0 {
- return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint)
+ return "", fmt.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint)
}
- return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint)
+ return "", fmt.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint)
}
// ImageCopyTmpDir default directory to store temporary image files during copy
@@ -1253,7 +1260,7 @@ func (c *Config) ImageCopyTmpDir() (string, error) {
}
}
- return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir)
+ return "", fmt.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir)
}
// setupEnv sets the environment variables for the engine
@@ -1305,3 +1312,14 @@ func (e eventsLogMaxSize) MarshalText() ([]byte, error) {
}
return []byte(fmt.Sprintf("%d", e)), nil
}
+
+func ValidateImageVolumeMode(mode string) error {
+ if mode == "" {
+ return nil
+ }
+ if util.StringInSlice(mode, validImageVolumeModes) {
+ return nil
+ }
+
+ return fmt.Errorf("invalid image volume mode %q required value: %s", mode, strings.Join(validImageVolumeModes, ", "))
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go
index bfb967582..bc8ddc655 100644
--- a/vendor/github.com/containers/common/pkg/config/config_local.go
+++ b/vendor/github.com/containers/common/pkg/config/config_local.go
@@ -4,6 +4,7 @@
package config
import (
+ "fmt"
"os"
"path/filepath"
"regexp"
@@ -11,7 +12,6 @@ import (
"syscall"
units "github.com/docker/go-units"
- "github.com/pkg/errors"
)
// isDirectory tests whether the given path exists and is a directory. It
@@ -44,13 +44,13 @@ func (c *EngineConfig) validatePaths() error {
// shift between runs or even parts of the program. - The OCI runtime
// uses a different working directory than we do, for example.
if c.StaticDir != "" && !filepath.IsAbs(c.StaticDir) {
- return errors.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir)
+ return fmt.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir)
}
if c.TmpDir != "" && !filepath.IsAbs(c.TmpDir) {
- return errors.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir)
+ return fmt.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir)
}
if c.VolumePath != "" && !filepath.IsAbs(c.VolumePath) {
- return errors.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath)
+ return fmt.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath)
}
return nil
}
@@ -69,7 +69,7 @@ func (c *ContainersConfig) validateUlimits() error {
for _, u := range c.DefaultUlimits {
ul, err := units.ParseUlimit(u)
if err != nil {
- return errors.Wrapf(err, "unrecognized ulimit %s", u)
+ return fmt.Errorf("unrecognized ulimit %s: %w", u, err)
}
_, err = ul.GetRlimit()
if err != nil {
@@ -97,7 +97,7 @@ func (c *ContainersConfig) validateTZ() error {
}
}
- return errors.Errorf(
+ return fmt.Errorf(
"find timezone %s in paths: %s",
c.TZ, strings.Join(lookupPaths, ", "),
)
@@ -106,7 +106,7 @@ func (c *ContainersConfig) validateTZ() error {
func (c *ContainersConfig) validateUmask() error {
validUmask := regexp.MustCompile(`^[0-7]{1,4}$`)
if !validUmask.MatchString(c.Umask) {
- return errors.Errorf("not a valid umask %s", c.Umask)
+ return fmt.Errorf("not a valid umask %s", c.Umask)
}
return nil
}
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index a4e755a66..8fd951c4a 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -434,6 +434,16 @@ default_sysctls = [
#
#image_parallel_copies = 0
+# Tells container engines how to handle the builtin image volumes.
+# * bind: An anonymous named volume will be created and mounted
+# into the container.
+# * tmpfs: The volume is mounted onto the container as a tmpfs,
+# which allows users to create content that disappears when
+# the container is stopped.
+# * ignore: All volumes are just ignored and no action is taken.
+#
+#image_volume_mode = ""
+
# Default command to run the infra container
#
#infra_command = "/pause"
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index d988d3b1c..f381818f1 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -2,6 +2,7 @@ package config
import (
"bytes"
+ "errors"
"fmt"
"net"
"os"
@@ -19,7 +20,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/containers/storage/types"
"github.com/opencontainers/selinux/go-selinux"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -35,7 +35,7 @@ const (
// _conmonVersionFormatErr is used when the expected versio-format of conmon
// has changed.
- _conmonVersionFormatErr = "conmon version changed format"
+ _conmonVersionFormatErr = "conmon version changed format: %w"
// _defaultGraphRoot points to the default path of the graph root.
_defaultGraphRoot = "/var/lib/containers/storage"
@@ -43,26 +43,29 @@ const (
// _defaultTransport is a prefix that we apply to an image name to check
// docker hub first for the image.
_defaultTransport = "docker://"
+
+ // _defaultImageVolumeMode is a mode to handle built-in image volumes.
+ _defaultImageVolumeMode = "bind"
)
var (
- // DefaultInitPath is the default path to the container-init binary
+ // DefaultInitPath is the default path to the container-init binary.
DefaultInitPath = "/usr/libexec/podman/catatonit"
- // DefaultInfraImage to use for infra container
+ // DefaultInfraImage is the default image to run as infrastructure containers in pods.
DefaultInfraImage = ""
- // DefaultRootlessSHMLockPath is the default path for rootless SHM locks
+ // DefaultRootlessSHMLockPath is the default path for rootless SHM locks.
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
// DefaultDetachKeys is the default keys sequence for detaching a
- // container
+ // container.
DefaultDetachKeys = "ctrl-p,ctrl-q"
// ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH)
- // is out of date for the current podman version
+ // is out of date for the current podman version.
ErrConmonOutdated = errors.New("outdated conmon version")
- // ErrInvalidArg indicates that an invalid argument was passed
+ // ErrInvalidArg indicates that an invalid argument was passed.
ErrInvalidArg = errors.New("invalid argument")
- // DefaultHooksDirs defines the default hooks directory
+ // DefaultHooksDirs defines the default hooks directory.
DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"}
- // DefaultCapabilities for the default_capabilities option in the containers.conf file
+ // DefaultCapabilities is the default for the default_capabilities option in the containers.conf file.
DefaultCapabilities = []string{
"CAP_AUDIT_WRITE",
"CAP_CHOWN",
@@ -80,7 +83,7 @@ var (
"CAP_SYS_CHROOT",
}
- // It may seem a bit unconventional, but it is necessary to do so
+ // Search these locations in which CNIPlugins can be installed.
DefaultCNIPluginDirs = []string{
"/usr/local/libexec/cni",
"/usr/libexec/cni",
@@ -98,7 +101,7 @@ var (
}
// additionalHelperBinariesDir is an extra helper binaries directory that
// should be set during link-time, if different packagers put their
- // helper binary in a different location
+ // helper binary in a different location.
additionalHelperBinariesDir string
)
@@ -118,13 +121,13 @@ const (
// InstallPrefix is the prefix where podman will be installed.
// It can be overridden at build time.
_installPrefix = "/usr"
- // CgroupfsCgroupsManager represents cgroupfs native cgroup manager
+ // CgroupfsCgroupsManager represents cgroupfs native cgroup manager.
CgroupfsCgroupsManager = "cgroupfs"
// DefaultApparmorProfile specifies the default apparmor profile for the container.
DefaultApparmorProfile = apparmor.Profile
- // DefaultHostsFile is the default path to the hosts file
+ // DefaultHostsFile is the default path to the hosts file.
DefaultHostsFile = "/etc/hosts"
- // SystemdCgroupsManager represents systemd native cgroup manager
+ // SystemdCgroupsManager represents systemd native cgroup manager.
SystemdCgroupsManager = "systemd"
// DefaultLogSizeMax is the default value for the maximum log size
// allowed for a container. Negative values mean that no limit is imposed.
@@ -133,9 +136,9 @@ const (
// before rotation.
DefaultEventsLogSizeMax = uint64(1000000)
// DefaultPidsLimit is the default value for maximum number of processes
- // allowed inside a container
+ // allowed inside a container.
DefaultPidsLimit = 2048
- // DefaultPullPolicy pulls the image if it does not exist locally
+ // DefaultPullPolicy pulls the image if it does not exist locally.
DefaultPullPolicy = "missing"
// DefaultSignaturePolicyPath is the default value for the
// policy.json file.
@@ -146,11 +149,11 @@ const (
// DefaultRootlessSignaturePolicyPath is the location within
// XDG_CONFIG_HOME of the rootless policy.json file.
DefaultRootlessSignaturePolicyPath = "containers/policy.json"
- // DefaultShmSize default value
+ // DefaultShmSize is the default upper limit on the size of tmpfs mounts.
DefaultShmSize = "65536k"
- // DefaultUserNSSize default value
+ // DefaultUserNSSize indicates the default number of UIDs allocated for user namespace within a container.
DefaultUserNSSize = 65536
- // OCIBufSize limits maximum LogSizeMax
+ // OCIBufSize limits maximum LogSizeMax.
OCIBufSize = 8192
// SeccompOverridePath if this exists it overrides the default seccomp path.
SeccompOverridePath = _etcDir + "/containers/seccomp.json"
@@ -158,7 +161,7 @@ const (
SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json"
)
-// DefaultConfig defines the default values from containers.conf
+// DefaultConfig defines the default values from containers.conf.
func DefaultConfig() (*Config, error) {
defaultEngineConfig, err := defaultConfigFromMemory()
if err != nil {
@@ -294,6 +297,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
}
c.HooksDir = DefaultHooksDirs
c.ImageDefaultTransport = _defaultTransport
+ c.ImageVolumeMode = _defaultImageVolumeMode
c.StateType = BoltDBStateStore
c.ImageBuildFormat = "oci"
@@ -350,7 +354,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
"/usr/local/bin/krun",
},
}
- // Needs to be called after populating c.OCIRuntimes
+ // Needs to be called after populating c.OCIRuntimes.
c.OCIRuntime = c.findRuntime()
c.ConmonEnvVars = []string{
@@ -411,8 +415,8 @@ func defaultTmpDir() (string, error) {
if !os.IsExist(err) {
return "", err
} else if err := os.Chmod(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil {
- // The directory already exist, just set the sticky bit
- return "", errors.Wrap(err, "set sticky bit on")
+ // The directory already exists, so we try to make sure it's private and has the sticky bit set on it.
+ return "", fmt.Errorf("set sticky bit on: %w", err)
}
}
return filepath.Join(libpodRuntimeDir, "tmp"), nil
@@ -435,7 +439,7 @@ func probeConmon(conmonBinary string) error {
}
major, err := strconv.Atoi(matches[1])
if err != nil {
- return errors.Wrap(err, _conmonVersionFormatErr)
+ return fmt.Errorf(_conmonVersionFormatErr, err)
}
if major < _conmonMinMajorVersion {
return ErrConmonOutdated
@@ -446,7 +450,7 @@ func probeConmon(conmonBinary string) error {
minor, err := strconv.Atoi(matches[2])
if err != nil {
- return errors.Wrap(err, _conmonVersionFormatErr)
+ return fmt.Errorf(_conmonVersionFormatErr, err)
}
if minor < _conmonMinMinorVersion {
return ErrConmonOutdated
@@ -457,7 +461,7 @@ func probeConmon(conmonBinary string) error {
patch, err := strconv.Atoi(matches[3])
if err != nil {
- return errors.Wrap(err, _conmonVersionFormatErr)
+ return fmt.Errorf(_conmonVersionFormatErr, err)
}
if patch < _conmonMinPatchVersion {
return ErrConmonOutdated
@@ -469,7 +473,7 @@ func probeConmon(conmonBinary string) error {
return nil
}
-// NetNS returns the default network namespace
+// NetNS returns the default network namespace.
func (c *Config) NetNS() string {
return c.Containers.NetNS
}
@@ -478,7 +482,7 @@ func (c EngineConfig) EventsLogMaxSize() uint64 {
return uint64(c.EventsLogFileMaxSize)
}
-// SecurityOptions returns the default security options
+// SecurityOptions returns the default security options.
func (c *Config) SecurityOptions() []string {
securityOpts := []string{}
if c.Containers.SeccompProfile != "" && c.Containers.SeccompProfile != SeccompDefaultPath {
@@ -493,82 +497,82 @@ func (c *Config) SecurityOptions() []string {
return securityOpts
}
-// Sysctls returns the default sysctls
+// Sysctls returns the default sysctls to set in containers.
func (c *Config) Sysctls() []string {
return c.Containers.DefaultSysctls
}
-// Volumes returns the default additional volumes for containersvolumes
+// Volumes returns the default set of volumes that should be mounted in containers.
func (c *Config) Volumes() []string {
return c.Containers.Volumes
}
-// Devices returns the default additional devices for containers
+// Devices returns the default additional devices for containers.
func (c *Config) Devices() []string {
return c.Containers.Devices
}
-// DNSServers returns the default DNS servers to add to resolv.conf in containers
+// DNSServers returns the default DNS servers to add to resolv.conf in containers.
func (c *Config) DNSServers() []string {
return c.Containers.DNSServers
}
-// DNSSerches returns the default DNS searches to add to resolv.conf in containers
+// DNSSerches returns the default DNS searches to add to resolv.conf in containers.
func (c *Config) DNSSearches() []string {
return c.Containers.DNSSearches
}
-// DNSOptions returns the default DNS options to add to resolv.conf in containers
+// DNSOptions returns the default DNS options to add to resolv.conf in containers.
func (c *Config) DNSOptions() []string {
return c.Containers.DNSOptions
}
-// Env returns the default additional environment variables to add to containers
+// Env returns the default additional environment variables to add to containers.
func (c *Config) Env() []string {
return c.Containers.Env
}
-// InitPath returns the default init path to add to containers
+// InitPath returns location where init program added to containers when users specify the --init flag.
func (c *Config) InitPath() string {
return c.Containers.InitPath
}
-// IPCNS returns the default IPC Namespace configuration to run containers with
+// IPCNS returns the default IPC Namespace configuration to run containers with.
func (c *Config) IPCNS() string {
return c.Containers.IPCNS
}
-// PIDNS returns the default PID Namespace configuration to run containers with
+// PIDNS returns the default PID Namespace configuration to run containers with.
func (c *Config) PidNS() string {
return c.Containers.PidNS
}
-// CgroupNS returns the default Cgroup Namespace configuration to run containers with
+// CgroupNS returns the default Cgroup Namespace configuration to run containers with.
func (c *Config) CgroupNS() string {
return c.Containers.CgroupNS
}
-// Cgroups returns whether to containers with cgroup confinement
+// Cgroups returns whether to run containers in their own control groups, as configured by the "cgroups" setting in containers.conf.
func (c *Config) Cgroups() string {
return c.Containers.Cgroups
}
-// UTSNS returns the default UTS Namespace configuration to run containers with
+// UTSNS returns the default UTS Namespace configuration to run containers with.
func (c *Config) UTSNS() string {
return c.Containers.UTSNS
}
-// ShmSize returns the default size for temporary file systems to use in containers
+// ShmSize returns the default size for temporary file systems to use in containers.
func (c *Config) ShmSize() string {
return c.Containers.ShmSize
}
-// Ulimits returns the default ulimits to use in containers
+// Ulimits returns the default ulimits to use in containers.
func (c *Config) Ulimits() []string {
return c.Containers.DefaultUlimits
}
-// PidsLimit returns the default maximum number of pids to use in containers
+// PidsLimit returns the default maximum number of pids to use in containers.
func (c *Config) PidsLimit() int64 {
if unshare.IsRootless() {
if c.Engine.CgroupManager != SystemdCgroupsManager {
@@ -583,12 +587,12 @@ func (c *Config) PidsLimit() int64 {
return c.Containers.PidsLimit
}
-// DetachKeys returns the default detach keys to detach from a container
+// DetachKeys returns the default detach keys to detach from a container.
func (c *Config) DetachKeys() string {
return c.Engine.DetachKeys
}
-// Tz returns the timezone in the container
+// TZ returns the timezone to set in containers.
func (c *Config) TZ() string {
return c.Containers.TZ
}
@@ -598,17 +602,17 @@ func (c *Config) Umask() string {
}
// LogDriver returns the logging driver to be used
-// currently k8s-file or journald
+// currently k8s-file or journald.
func (c *Config) LogDriver() string {
return c.Containers.LogDriver
}
-// MachineEnabled returns if podman is running inside a VM or not
+// MachineEnabled returns if podman is running inside a VM or not.
func (c *Config) MachineEnabled() bool {
return c.Engine.MachineEnabled
}
-// MachineVolumes returns volumes to mount into the VM
+// MachineVolumes returns volumes to mount into the VM.
func (c *Config) MachineVolumes() ([]string, error) {
return machineVolumes(c.Machine.Volumes)
}
@@ -619,10 +623,10 @@ func machineVolumes(volumes []string) ([]string, error) {
vol := os.ExpandEnv(v)
split := strings.Split(vol, ":")
if len(split) < 2 || len(split) > 3 {
- return nil, errors.Errorf("invalid machine volume %s, 2 or 3 fields required", v)
+ return nil, fmt.Errorf("invalid machine volume %s, 2 or 3 fields required", v)
}
if split[0] == "" || split[1] == "" {
- return nil, errors.Errorf("invalid machine volume %s, fields must container data", v)
+ return nil, fmt.Errorf("invalid machine volume %s, fields must container data", v)
}
translatedVolumes = append(translatedVolumes, vol)
}
diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go
index 8c1f0ec29..c85227fe4 100644
--- a/vendor/github.com/containers/common/pkg/config/pull_policy.go
+++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go
@@ -2,8 +2,6 @@ package config
import (
"fmt"
-
- "github.com/pkg/errors"
)
// PullPolicy determines how and which images are being pulled from a container
@@ -63,7 +61,7 @@ func (p PullPolicy) Validate() error {
case PullPolicyAlways, PullPolicyMissing, PullPolicyNewer, PullPolicyNever:
return nil
default:
- return errors.Errorf("unsupported pull policy %d", p)
+ return fmt.Errorf("unsupported pull policy %d", p)
}
}
@@ -85,7 +83,7 @@ func ParsePullPolicy(s string) (PullPolicy, error) {
case "never", "Never":
return PullPolicyNever, nil
default:
- return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s)
+ return PullPolicyUnsupported, fmt.Errorf("unsupported pull policy %q", s)
}
}
diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go
index 6c4958cc2..43b783e0c 100644
--- a/vendor/github.com/containers/common/pkg/parse/parse.go
+++ b/vendor/github.com/containers/common/pkg/parse/parse.go
@@ -14,7 +14,7 @@ import (
// ValidateVolumeOpts validates a volume's options
func ValidateVolumeOpts(options []string) ([]string, error) {
- var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int
+ var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir, foundCopy int
finalOpts := make([]string, 0, len(options))
for _, opt := range options {
// support advanced options like upperdir=/path, workdir=/path
@@ -88,6 +88,11 @@ func ValidateVolumeOpts(options []string) ([]string, error) {
// are intended to be always safe to use, even not on OS
// X).
continue
+ case "copy", "nocopy":
+ foundCopy++
+ if foundCopy > 1 {
+ return nil, errors.Errorf("invalid options %q, can only specify 1 'copy' or 'nocopy' option", strings.Join(options, ", "))
+ }
default:
return nil, errors.Errorf("invalid option type %q", opt)
}
diff --git a/pkg/kubeutils/resize.go b/vendor/github.com/containers/common/pkg/resize/resize.go
index a744c66cc..9a2afcf73 100644
--- a/pkg/kubeutils/resize.go
+++ b/vendor/github.com/containers/common/pkg/resize/resize.go
@@ -14,16 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package kubeutils
+package resize
-import (
- "github.com/containers/podman/v4/libpod/define"
-)
+// TerminalSize represents the width and height of a terminal.
+type TerminalSize struct {
+ Width uint16
+ Height uint16
+}
// HandleResizing spawns a goroutine that processes the resize channel, calling resizeFunc for each
-// remotecommand.TerminalSize received from the channel. The resize channel must be closed elsewhere to stop the
+// TerminalSize received from the channel. The resize channel must be closed elsewhere to stop the
// goroutine.
-func HandleResizing(resize <-chan define.TerminalSize, resizeFunc func(size define.TerminalSize)) {
+func HandleResizing(resize <-chan TerminalSize, resizeFunc func(size TerminalSize)) {
if resize == nil {
return
}
diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go
index 234fd3448..321131f69 100644
--- a/vendor/github.com/containers/common/pkg/retry/retry.go
+++ b/vendor/github.com/containers/common/pkg/retry/retry.go
@@ -16,26 +16,29 @@ import (
"github.com/sirupsen/logrus"
)
-// RetryOptions defines the option to retry
-// revive does not like the name because the package is already called retry
-//nolint:revive
-type RetryOptions struct {
- MaxRetry int // The number of times to possibly retry
- Delay time.Duration // The delay to use between retries, if set
+// Options defines the option to retry.
+type Options struct {
+ MaxRetry int // The number of times to possibly retry.
+ Delay time.Duration // The delay to use between retries, if set.
}
-// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
-//
-// revive does not like the name because the package is already called retry
-//nolint:revive
-func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
+// RetryOptions is deprecated, use Options.
+type RetryOptions = Options // nolint:revive
+
+// RetryIfNecessary deprecated function use IfNecessary.
+func RetryIfNecessary(ctx context.Context, operation func() error, options *Options) error { // nolint:revive
+ return IfNecessary(ctx, operation, options)
+}
+
+// IfNecessary retries the operation in exponential backoff with the retry Options.
+func IfNecessary(ctx context.Context, operation func() error, options *Options) error {
err := operation()
- for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {
+ for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ {
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
- if retryOptions.Delay != 0 {
- delay = retryOptions.Delay
+ if options.Delay != 0 {
+ delay = options.Delay
}
- logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err)
+ logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, options.MaxRetry, err)
select {
case <-time.After(delay):
break
@@ -96,6 +99,14 @@ func isRetryable(err error) bool {
}
}
return true
+ case net.Error:
+ if e.Timeout() {
+ return true
+ }
+ if unwrappable, ok := e.(unwrapper); ok {
+ err = unwrappable.Unwrap()
+ return isRetryable(err)
+ }
case unwrapper: // Test this last, because various error types might implement .Unwrap()
err = e.Unwrap()
return isRetryable(err)
diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
index 3712afc71..0db77879c 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
@@ -221,6 +221,9 @@ func DefaultProfile() *Seccomp {
"ipc",
"keyctl",
"kill",
+ "landlock_add_rule",
+ "landlock_create_ruleset",
+ "landlock_restrict_self",
"lchown",
"lchown32",
"lgetxattr",
diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go
index 609036c82..7f1783efb 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/filter.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go
@@ -168,7 +168,8 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error {
func toAction(act Action, errnoRet *uint) (libseccomp.ScmpAction, error) {
switch act {
case ActKill:
- return libseccomp.ActKill, nil
+ // lint was not passing until this was changed from ActKill to ActKilThread.
+ return libseccomp.ActKillThread, nil
case ActKillProcess:
return libseccomp.ActKillProcess, nil
case ActErrno:
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
index 442632e7d..18674db4d 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
@@ -228,6 +228,9 @@
"ipc",
"keyctl",
"kill",
+ "landlock_add_rule",
+ "landlock_create_ruleset",
+ "landlock_restrict_self",
"lchown",
"lchown32",
"lgetxattr",
diff --git a/vendor/github.com/containers/common/pkg/util/copy.go b/vendor/github.com/containers/common/pkg/util/copy.go
new file mode 100644
index 000000000..a45b82fc9
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/util/copy.go
@@ -0,0 +1,57 @@
+package util
+
+import (
+ "errors"
+ "io"
+)
+
+// ErrDetach indicates that an attach session was manually detached by
+// the user.
+var ErrDetach = errors.New("detached from container")
+
+// CopyDetachable is similar to io.Copy but support a detach key sequence to break out.
+func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) {
+ buf := make([]byte, 32*1024)
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ preservBuf := []byte{}
+ for i, key := range keys {
+ preservBuf = append(preservBuf, buf[0:nr]...)
+ if nr != 1 || buf[0] != key {
+ break
+ }
+ if i == len(keys)-1 {
+ return 0, ErrDetach
+ }
+ nr, er = src.Read(buf)
+ }
+ var nw int
+ var ew error
+ if len(preservBuf) > 0 {
+ nw, ew = dst.Write(preservBuf)
+ nr = len(preservBuf)
+ } else {
+ nw, ew = dst.Write(buf[0:nr])
+ }
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ if er != nil {
+ if er != io.EOF {
+ err = er
+ }
+ break
+ }
+ }
+ return written, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go
new file mode 100644
index 000000000..020e703e8
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/blob.go
@@ -0,0 +1,170 @@
+package copy
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest,
+// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
+// perhaps (de/re/)compressing it if canModifyBlob,
+// and returns a complete blobInfo of the copied blob.
+func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo,
+ getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
+ isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
+ // The copying happens through a pipeline of connected io.Readers;
+ // that pipeline is built by updating stream.
+ // === Input: srcReader
+ stream := sourceStream{
+ reader: srcReader,
+ info: srcInfo,
+ }
+
+ // === Process input through digestingReader to validate against the expected digest.
+ // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
+ // use a separate validation failure indicator.
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
+ // dest.PutBlob may detect that the layer already exists, in which case we don't
+ // read stream to the end, and validation does not happen.
+ digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
+ }
+ stream.reader = digestingReader
+
+ // === Update progress bars
+ stream.reader = bar.ProxyReader(stream.reader)
+
+ // === Decrypt the stream, if required.
+ decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Detect compression of the input stream.
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
+ var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
+ if getOriginalLayerCopyWriter != nil {
+ stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor))
+ originalLayerReader = stream.reader
+ }
+
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ canModifyBlob := !isConfig && ic.cannotModifyManifestReason == ""
+ // === Deal with layer compression/decompression if necessary
+ compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer compressionStep.close()
+
+ // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
+ if decryptionStep.decrypting && toEncrypt {
+ // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value.
+ // Before relaxing this, see the original pull request’s review if there are other reasons to reject this.
+ return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
+ }
+ encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Report progress using the ic.c.progress channel, if required.
+ if ic.c.progress != nil && ic.c.progressInterval > 0 {
+ progressReader := newProgressReader(
+ stream.reader,
+ ic.c.progress,
+ ic.c.progressInterval,
+ srcInfo,
+ )
+ defer progressReader.reportDone()
+ stream.reader = progressReader
+ }
+
+ // === Finally, send the layer stream to dest.
+ options := private.PutBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ IsConfig: isConfig,
+ EmptyLayer: emptyLayer,
+ }
+ if !isConfig {
+ options.LayerIndex = &layerIndex
+ }
+ uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "writing blob")
+ }
+
+ uploadedInfo.Annotations = stream.info.Annotations
+
+ compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
+ decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
+ if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
+ // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
+ // So, read everything from originalLayerReader, which will cause the rest to be
+ // sent there if we are not already at EOF.
+ if getOriginalLayerCopyWriter != nil {
+ logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
+ _, err := io.Copy(io.Discard, originalLayerReader)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
+ }
+ }
+
+ if digestingReader.validationFailed { // Coverage: This should never happen.
+ return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
+ }
+ if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
+ return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
+ }
+ if digestingReader.validationSucceeded {
+ if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
+ return types.BlobInfo{}, err
+ }
+ }
+
+ return uploadedInfo, nil
+}
+
+// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built.
+// This allows handles of individual aspects to build the copy pipeline without _too much_
+// specific cooperation by the caller.
+//
+// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline
+// without specific knowledge of various aspects in copyBlobFromStream; that may come one day.
+type sourceStream struct {
+ reader io.Reader
+ info types.BlobInfo // corresponding to the data available in reader.
+}
+
+// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
+// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
+type errorAnnotationReader struct {
+ reader io.Reader
+}
+
+// Read annotates the error happened during read
+func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
+ n, err = r.reader.Read(b)
+ if err != io.EOF {
+ return n, errors.Wrapf(err, "happened during read")
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go
new file mode 100644
index 000000000..99305a039
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/compression.go
@@ -0,0 +1,320 @@
+package copy
+
+import (
+ "io"
+
+ internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
+type bpDetectCompressionStepData struct {
+ isCompressed bool
+ format compressiontypes.Algorithm // Valid if isCompressed
+ decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
+ srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
+}
+
+// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
+// srcInfo is only used for error messages.
+// Returns data for other steps.
+func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) {
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
+ if err != nil {
+ return bpDetectCompressionStepData{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
+ }
+ stream.reader = reader
+
+ res := bpDetectCompressionStepData{
+ isCompressed: decompressor != nil,
+ format: format,
+ decompressor: decompressor,
+ }
+ if res.isCompressed {
+ res.srcCompressorName = format.Name()
+ } else {
+ res.srcCompressorName = internalblobinfocache.Uncompressed
+ }
+
+ if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
+ logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
+ }
+ return res, nil
+}
+
+// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
+type bpCompressionStepData struct {
+ operation types.LayerCompression // Operation to use for updating the blob metadata.
+ uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
+ uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
+ srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
+ uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
+ closers []io.Closer // Objects to close after the upload is done, if any.
+}
+
+// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
+// and must eventually call close.
+func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo,
+ detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType)
+ if !layerCompressionChangeSupported {
+ logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType)
+ }
+ if canModifyBlob && layerCompressionChangeSupported {
+ for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
+ ic.bpcPreserveEncrypted,
+ ic.bpcCompressUncompressed,
+ ic.bpcRecompressCompressed,
+ ic.bpcDecompressCompressed,
+ } {
+ res, err := fn(stream, detected)
+ if err != nil {
+ return nil, err
+ }
+ if res != nil {
+ return res, nil
+ }
+ }
+ }
+ return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil
+}
+
+// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if isOciEncrypted(stream.info.MediaType) {
+ logrus.Debugf("Using original blob without modification for encrypted blob")
+ // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: nil,
+ srcCompressorName: internalblobinfocache.UnknownCompression,
+ uploadedCompressorName: internalblobinfocache.UnknownCompression,
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
+ logrus.Debugf("Compressing blob on the fly")
+ var uploadedAlgorithm *compressiontypes.Algorithm
+ if ic.c.compressionFormat != nil {
+ uploadedAlgorithm = ic.c.compressionFormat
+ } else {
+ uploadedAlgorithm = defaultCompressionFormat
+ }
+
+ reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm)
+ // Note: reader must be closed on all return paths.
+ stream.reader = reader
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ return &bpCompressionStepData{
+ operation: types.Compress,
+ uploadedAlgorithm: uploadedAlgorithm,
+ uploadedAnnotations: annotations,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: uploadedAlgorithm.Name(),
+ closers: []io.Closer{reader},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
+ ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() {
+ // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
+ // re-compressed using the desired format.
+ logrus.Debugf("Blob will be converted")
+
+ decompressed, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ decompressed.Close()
+ }
+ }()
+
+ recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat)
+ // Note: recompressed must be closed on all return paths.
+ stream.reader = recompressed
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ succeeded = true
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: ic.c.compressionFormat,
+ uploadedAnnotations: annotations,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: ic.c.compressionFormat.Name(),
+ closers: []io.Closer{decompressed, recompressed},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed {
+ logrus.Debugf("Blob will be decompressed")
+ s, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ // Note: s must be closed on all return paths.
+ stream.reader = s
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ return &bpCompressionStepData{
+ operation: types.Decompress,
+ uploadedAlgorithm: nil,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: internalblobinfocache.Uncompressed,
+ closers: []io.Closer{s},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
+func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
+ layerCompressionChangeSupported bool) *bpCompressionStepData {
+ logrus.Debugf("Using original blob without modification")
+ // Remember if the original blob was compressed, and if so how, so that if
+ // LayerInfosForCopy() returned something that differs from what was in the
+ // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
+ // it will be able to correctly derive the MediaType for the copied blob.
+ //
+ // But don’t touch blobs in objects where we can’t change compression,
+ // so that src.UpdatedImage() doesn’t fail; assume that for such blobs
+ // LayerInfosForCopy() should not be making any changes in the first place.
+ var algorithm *compressiontypes.Algorithm
+ if layerCompressionChangeSupported && detected.isCompressed {
+ algorithm = &detected.format
+ } else {
+ algorithm = nil
+ }
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: algorithm,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: detected.srcCompressorName,
+ }
+}
+
+// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
+func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
+ *operation = d.operation
+ // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
+ *algorithm = d.uploadedAlgorithm
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ for k, v := range d.uploadedAnnotations {
+ (*annotations)[k] = v
+ }
+}
+
+// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
+// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties.
+func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
+ encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptionStep.encrypting && !decryptionStep.decrypting {
+ // If d.operation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
+ // (because stream.info.Digest == "", this must have been computed afresh).
+ switch d.operation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return errors.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
+ }
+ }
+ if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
+ c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
+ }
+ if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
+ c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
+ }
+ return nil
+}
+
+// close closes objects that carry state throughout the compression/decompression operation.
+func (d *bpCompressionStepData) close() {
+ for _, c := range d.closers {
+ c.Close()
+ }
+}
+
+// doCompression reads all input from src and writes its compressed equivalent to dest.
+func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
+ compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, compressionBufferSize)
+
+ _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
+ if err != nil {
+ compressor.Close()
+ return err
+ }
+
+ return compressor.Close()
+}
+
+// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
+func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
+ err := errors.New("Internal error: unexpected panic in compressGoroutine")
+ defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
+ }()
+
+ err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
+}
+
+// compressedStream returns a stream the input reader compressed using format, and a metadata map.
+// The caller must close the returned reader.
+// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
+func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
+ pipeReader, pipeWriter := io.Pipe()
+ annotations := map[string]string{}
+ // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
+ // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
+ // we don’t care.
+ go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
+ return pipeReader, annotations
+}
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 123c23e02..0df595237 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -12,8 +12,8 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagesource"
"github.com/containers/image/v5/internal/pkg/platform"
@@ -25,7 +25,6 @@ import (
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -82,7 +81,7 @@ type copier struct {
type imageCopier struct {
c *copier
manifestUpdates *types.ManifestUpdateOptions
- src types.Image
+ src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
@@ -349,13 +348,8 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
-func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
- srcManifest, _, err := src.Manifest(ctx)
- if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "reading manifest from image")
- }
-
- srcManifestDigest, err := manifest.Digest(srcManifest)
+func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
+ srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
}
@@ -620,11 +614,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
- sourceManifest, _, err := src.Manifest(ctx)
- if err != nil {
- return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
- }
- matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
+ matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
}
@@ -688,12 +678,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
cannotModifyManifestReason: cannotModifyManifestReason,
ociEncryptLayers: options.OciEncryptLayers,
}
- // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
- // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
- // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
- // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
- // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
- // and we would reuse and sign it.
+ // Decide whether we can substitute blobs with semantic equivalents:
+ // - Don’t do that if we can’t modify the manifest at all
+ // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == ""
if err := ic.updateEmbeddedDockerReference(); err != nil {
@@ -702,12 +694,23 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
- // We compute preferredManifestMIMEType only to show it in error messages.
- // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
- preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption)
+ manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
+ srcMIMEType: ic.src.ManifestMIMEType,
+ destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
+ forceManifestMIMEType: options.ForceManifestMIMEType,
+ requiresOCIEncryption: destRequiresOciEncryption,
+ cannotModifyManifestReason: ic.cannotModifyManifestReason,
+ })
if err != nil {
return nil, "", "", err
}
+ // We set up this part of ic.manifestUpdates quite early, not just around the
+ // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
+ // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
+ // on the expected destination format.
+ if manifestConversionPlan.preferredMIMETypeNeedsConversion {
+ ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
+ }
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
@@ -742,22 +745,22 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
- retManifestType = preferredManifestMIMEType
+ retManifestType = manifestConversionPlan.preferredMIMEType
if err != nil {
- logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
+ logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed.
_, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
_, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
- if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 {
+ if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We don’t have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Don’t bother the user with MIME types if we have no choice.
return nil, "", "", err
}
- // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
+ // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so let’s bail out early and with a better error message.
@@ -766,8 +769,8 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
- errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
- for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
+ errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
+ for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
@@ -908,11 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// The manifest is used to extract the information whether a given
// layer is empty.
- manifestBlob, manifestType, err := ic.src.Manifest(ctx)
- if err != nil {
- return err
- }
- man, err := manifest.FromBlob(manifestBlob, manifestType)
+ man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return err
}
@@ -1022,7 +1021,7 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
// stores the resulting config and manifest to the destination, and returns the stored manifest
// and its digest.
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
- pendingImage := ic.src
+ var pendingImage types.Image = ic.src
if !ic.noPendingManifestUpdates() {
if ic.cannotModifyManifestReason != "" {
return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
@@ -1047,7 +1046,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
return nil, "", errors.Wrap(err, "reading manifest")
}
- if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
+ if err := ic.copyConfig(ctx, pendingImage); err != nil {
return nil, "", err
}
@@ -1067,19 +1066,19 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
}
// copyConfig copies config.json, if any, from src to dest.
-func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
+func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
- if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying config: %w", err)
}
- defer c.concurrentBlobCopiesSemaphore.Release(1)
+ defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
- progressPool := c.newProgressPool()
+ progressPool := ic.c.newProgressPool()
defer progressPool.Wait()
- bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
+ bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false)
configBlob, err := src.ConfigBlob(ctx)
@@ -1087,7 +1086,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
}
- destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
+ destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
}
@@ -1146,6 +1145,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
if canAvoidProcessingCompleteLayer {
+ canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
+ logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
+ srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
+ canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
@@ -1154,7 +1157,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// the ImageDestination interface lets us pass in.
reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
- CanSubstitute: ic.canSubstituteBlobs,
+ CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
@@ -1303,7 +1306,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
}
}
- blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
+ blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
@@ -1333,350 +1336,3 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
return digest.Canonical.FromReader(stream)
}
-
-// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
-// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
-type errorAnnotationReader struct {
- reader io.Reader
-}
-
-// Read annotates the error happened during read
-func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
- n, err = r.reader.Read(b)
- if err != io.EOF {
- return n, errors.Wrapf(err, "happened during read")
- }
- return n, err
-}
-
-// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
-// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
-// perhaps (de/re/)compressing it if canModifyBlob,
-// and returns a complete blobInfo of the copied blob.
-func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
- canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
- if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
- canModifyBlob = false
- }
-
- // The copying happens through a pipeline of connected io.Readers.
- // === Input: srcStream
-
- // === Process input through digestingReader to validate against the expected digest.
- // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
- // use a separate validation failure indicator.
- // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
- // dest.PutBlob may detect that the layer already exists, in which case we don't
- // read stream to the end, and validation does not happen.
- digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
- }
- var destStream io.Reader = digestingReader
-
- // === Update progress bars
- destStream = bar.ProxyReader(destStream)
-
- // === Decrypt the stream, if required.
- var decrypted bool
- if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
- newDesc := imgspecv1.Descriptor{
- Annotations: srcInfo.Annotations,
- }
-
- var d digest.Digest
- destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
- }
-
- srcInfo.Digest = d
- srcInfo.Size = -1
- for k := range srcInfo.Annotations {
- if strings.HasPrefix(k, "org.opencontainers.image.enc") {
- delete(srcInfo.Annotations, k)
- }
- }
- decrypted = true
- }
-
- // === Detect compression of the input stream.
- // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
- compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
- }
- isCompressed := decompressor != nil
- if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
- logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
- }
-
- // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
- var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
- if getOriginalLayerCopyWriter != nil {
- destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
- originalLayerReader = destStream
- }
-
- compressionMetadata := map[string]string{}
- // === Deal with layer compression/decompression if necessary
- // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
- // short-circuit conditions
- var inputInfo types.BlobInfo
- var compressionOperation types.LayerCompression
- var uploadCompressionFormat *compressiontypes.Algorithm
- srcCompressorName := internalblobinfocache.Uncompressed
- if isCompressed {
- srcCompressorName = compressionFormat.Name()
- }
- var uploadCompressorName string
- if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
- // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
- logrus.Debugf("Using original blob without modification for encrypted blob")
- compressionOperation = types.PreserveOriginal
- inputInfo = srcInfo
- srcCompressorName = internalblobinfocache.UnknownCompression
- uploadCompressionFormat = nil
- uploadCompressorName = internalblobinfocache.UnknownCompression
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
- logrus.Debugf("Compressing blob on the fly")
- compressionOperation = types.Compress
- pipeReader, pipeWriter := io.Pipe()
- defer pipeReader.Close()
-
- if c.compressionFormat != nil {
- uploadCompressionFormat = c.compressionFormat
- } else {
- uploadCompressionFormat = defaultCompressionFormat
- }
- // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
- // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
- // we don’t care.
- go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
- destStream = pipeReader
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressorName = uploadCompressionFormat.Name()
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed &&
- c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() {
- // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
- // re-compressed using the desired format.
- logrus.Debugf("Blob will be converted")
-
- compressionOperation = types.PreserveOriginal
- s, err := decompressor(destStream)
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer s.Close()
-
- pipeReader, pipeWriter := io.Pipe()
- defer pipeReader.Close()
-
- uploadCompressionFormat = c.compressionFormat
- go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
-
- destStream = pipeReader
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressorName = uploadCompressionFormat.Name()
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
- logrus.Debugf("Blob will be decompressed")
- compressionOperation = types.Decompress
- s, err := decompressor(destStream)
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer s.Close()
- destStream = s
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressionFormat = nil
- uploadCompressorName = internalblobinfocache.Uncompressed
- } else {
- // PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
- logrus.Debugf("Using original blob without modification")
- compressionOperation = types.PreserveOriginal
- inputInfo = srcInfo
- // Remember if the original blob was compressed, and if so how, so that if
- // LayerInfosForCopy() returned something that differs from what was in the
- // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
- // it will be able to correctly derive the MediaType for the copied blob.
- if isCompressed {
- uploadCompressionFormat = &compressionFormat
- } else {
- uploadCompressionFormat = nil
- }
- uploadCompressorName = srcCompressorName
- }
-
- // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
- var (
- encrypted bool
- finalizer ocicrypt.EncryptLayerFinalizer
- )
- if toEncrypt {
- if decrypted {
- return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
- }
-
- if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
- var annotations map[string]string
- if !decrypted {
- annotations = srcInfo.Annotations
- }
- desc := imgspecv1.Descriptor{
- MediaType: srcInfo.MediaType,
- Digest: srcInfo.Digest,
- Size: srcInfo.Size,
- Annotations: annotations,
- }
-
- s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
- }
-
- destStream = s
- finalizer = fin
- inputInfo.Digest = ""
- inputInfo.Size = -1
- encrypted = true
- }
- }
-
- // === Report progress using the c.progress channel, if required.
- if c.progress != nil && c.progressInterval > 0 {
- progressReader := newProgressReader(
- destStream,
- c.progress,
- c.progressInterval,
- srcInfo,
- )
- defer progressReader.reportDone()
- destStream = progressReader
- }
-
- // === Finally, send the layer stream to dest.
- options := private.PutBlobOptions{
- Cache: c.blobInfoCache,
- IsConfig: isConfig,
- EmptyLayer: emptyLayer,
- }
- if !isConfig {
- options.LayerIndex = &layerIndex
- }
- uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "writing blob")
- }
-
- uploadedInfo.Annotations = srcInfo.Annotations
-
- uploadedInfo.CompressionOperation = compressionOperation
- // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
- uploadedInfo.CompressionAlgorithm = uploadCompressionFormat
- if decrypted {
- uploadedInfo.CryptoOperation = types.Decrypt
- } else if encrypted {
- encryptAnnotations, err := finalizer()
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption")
- }
- uploadedInfo.CryptoOperation = types.Encrypt
- if uploadedInfo.Annotations == nil {
- uploadedInfo.Annotations = map[string]string{}
- }
- for k, v := range encryptAnnotations {
- uploadedInfo.Annotations[k] = v
- }
- }
-
- // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
- // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
- // So, read everything from originalLayerReader, which will cause the rest to be
- // sent there if we are not already at EOF.
- if getOriginalLayerCopyWriter != nil {
- logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
- _, err := io.Copy(io.Discard, originalLayerReader)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
- }
- }
-
- if digestingReader.validationFailed { // Coverage: This should never happen.
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
- }
- if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
- }
- if digestingReader.validationSucceeded {
- // Don’t record any associations that involve encrypted data. This is a bit crude,
- // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
- // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
- // This crude approach also means we don’t need to record whether a blob is encrypted
- // in the blob info cache (which would probably be necessary for any more complex logic),
- // and the simplicity is attractive.
- if !encrypted && !decrypted {
- // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
- // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
- // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
- // (because inputInfo.Digest == "", this must have been computed afresh).
- switch compressionOperation {
- case types.PreserveOriginal:
- break // Do nothing, we have only one digest and we might not have even verified it.
- case types.Compress:
- c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
- case types.Decompress:
- c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
- default:
- return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
- }
- }
- if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
- c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
- }
- if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression {
- c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
- }
- }
-
- // Copy all the metadata generated by the compressor into the annotations.
- if uploadedInfo.Annotations == nil {
- uploadedInfo.Annotations = map[string]string{}
- }
- for k, v := range compressionMetadata {
- uploadedInfo.Annotations[k] = v
- }
-
- return uploadedInfo, nil
-}
-
-// doCompression reads all input from src and writes its compressed equivalent to dest.
-func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
- compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
- if err != nil {
- return err
- }
-
- buf := make([]byte, compressionBufferSize)
-
- _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
- if err != nil {
- compressor.Close()
- return err
- }
-
- return compressor.Close()
-}
-
-// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
-func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
- err := errors.New("Internal error: unexpected panic in compressGoroutine")
- defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
- _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
- }()
-
- err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
-}
diff --git a/vendor/github.com/containers/image/v5/copy/encrypt.go b/vendor/github.com/containers/image/v5/copy/encrypt.go
deleted file mode 100644
index a18d6f151..000000000
--- a/vendor/github.com/containers/image/v5/copy/encrypt.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package copy
-
-import (
- "strings"
-
- "github.com/containers/image/v5/types"
-)
-
-// isOciEncrypted returns a bool indicating if a mediatype is encrypted
-// This function will be moved to be part of OCI spec when adopted.
-func isOciEncrypted(mediatype string) bool {
- return strings.HasSuffix(mediatype, "+encrypted")
-}
-
-// isEncrypted checks if an image is encrypted
-func isEncrypted(i types.Image) bool {
- layers := i.LayerInfos()
- for _, l := range layers {
- if isOciEncrypted(l.MediaType) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go
new file mode 100644
index 000000000..ae0576da4
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/encryption.go
@@ -0,0 +1,129 @@
+package copy
+
+import (
+ "strings"
+
+ "github.com/containers/image/v5/types"
+ "github.com/containers/ocicrypt"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// isOciEncrypted returns a bool indicating if a mediatype is encrypted
+// This function will be moved to be part of OCI spec when adopted.
+func isOciEncrypted(mediatype string) bool {
+ return strings.HasSuffix(mediatype, "+encrypted")
+}
+
+// isEncrypted checks if an image is encrypted
+func isEncrypted(i types.Image) bool {
+ layers := i.LayerInfos()
+ for _, l := range layers {
+ if isOciEncrypted(l.MediaType) {
+ return true
+ }
+ }
+ return false
+}
+
+// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
+type bpDecryptionStepData struct {
+ decrypting bool // We are actually decrypting the stream
+}
+
+// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
+// srcInfo is only used for error messages.
+// Returns data for other steps; the caller should eventually use updateCryptoOperation.
+func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
+ if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil {
+ desc := imgspecv1.Descriptor{
+ Annotations: stream.info.Annotations,
+ }
+ reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
+ if err != nil {
+ return nil, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = decryptedDigest
+ stream.info.Size = -1
+ for k := range stream.info.Annotations {
+ if strings.HasPrefix(k, "org.opencontainers.image.enc") {
+ delete(stream.info.Annotations, k)
+ }
+ }
+ return &bpDecryptionStepData{
+ decrypting: true,
+ }, nil
+ }
+ return &bpDecryptionStepData{
+ decrypting: false,
+ }, nil
+}
+
+// updateCryptoOperation sets *operation, if necessary.
+func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) {
+ if d.decrypting {
+ *operation = types.Decrypt
+ }
+}
+
+// bpdData contains data that the copy pipeline needs about the encryption step.
+type bpEncryptionStepData struct {
+ encrypting bool // We are actually encrypting the stream
+ finalizer ocicrypt.EncryptLayerFinalizer
+}
+
+// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
+func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
+ decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
+ if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
+ var annotations map[string]string
+ if !decryptionStep.decrypting {
+ annotations = srcInfo.Annotations
+ }
+ desc := imgspecv1.Descriptor{
+ MediaType: srcInfo.MediaType,
+ Digest: srcInfo.Digest,
+ Size: srcInfo.Size,
+ Annotations: annotations,
+ }
+ reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = ""
+ stream.info.Size = -1
+ return &bpEncryptionStepData{
+ encrypting: true,
+ finalizer: finalizer,
+ }, nil
+ }
+ return &bpEncryptionStepData{
+ encrypting: false,
+ }, nil
+}
+
+// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary.
+func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error {
+ if !d.encrypting {
+ return nil
+ }
+
+ encryptAnnotations, err := d.finalizer()
+ if err != nil {
+ return errors.Wrap(err, "Unable to finalize encryption")
+ }
+ *operation = types.Encrypt
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ for k, v := range encryptAnnotations {
+ (*annotations)[k] = v
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go
index 86ec8863a..b65459f8c 100644
--- a/vendor/github.com/containers/image/v5/copy/manifest.go
+++ b/vendor/github.com/containers/image/v5/copy/manifest.go
@@ -38,31 +38,50 @@ func (os *orderedSet) append(s string) {
}
}
-// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest.
-// Note that the conversion will only happen later, through ic.src.UpdatedImage
-// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
-// and a list of other possible alternatives, in order.
-func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
- _, srcType, err := ic.src.Manifest(ctx)
- if err != nil { // This should have been cached?!
- return "", nil, errors.Wrap(err, "reading manifest")
- }
+// determineManifestConversionInputs contains the inputs for determineManifestConversion.
+type determineManifestConversionInputs struct {
+ srcMIMEType string // MIME type of the input manifest
+
+ destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
+
+ forceManifestMIMEType string // User’s choice of forced manifest MIME type
+ requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
+ cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
+}
+
+// manifestConversionPlan contains the decisions made by determineManifestConversion.
+type manifestConversionPlan struct {
+ // The preferred manifest MIME type (whether we are converting to it or using it unmodified).
+ // We compute this only to show it in error messages; without having to add this context
+ // in an error message, we would be happy enough to know only that no conversion is needed.
+ preferredMIMEType string
+ preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step.
+ otherMIMETypeCandidates []string // Other possible alternatives, in order
+}
+
+// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in.
+func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) {
+ srcType := in.srcMIMEType
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
if srcType != normalizedSrcType {
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
srcType = normalizedSrcType
}
- if forceManifestMIMEType != "" {
- destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
+ destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes
+ if in.forceManifestMIMEType != "" {
+ destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
}
- if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
- return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
+ if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
+ return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
}
supportedByDest := map[string]struct{}{}
for _, t := range destSupportedManifestMIMETypes {
- if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) {
+ if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
supportedByDest[t] = struct{}{}
}
}
@@ -79,13 +98,16 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
if _, ok := supportedByDest[srcType]; ok {
prioritizedTypes.append(srcType)
}
- if ic.cannotModifyManifestReason != "" {
+ if in.cannotModifyManifestReason != "" {
// We could also drop this check and have the caller
// make the choice; it is already doing that to an extent, to improve error
// messages. But it is nice to hide the “if we can't modify, do no conversion”
// special case in here; the caller can then worry (or not) only about a good UI.
logrus.Debugf("We can't modify the manifest, hoping for the best...")
- return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
+ return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying?
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
}
// Then use our list of preferred types.
@@ -102,15 +124,17 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
- return "", nil, errors.New("Internal error: no candidate MIME types")
+ return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
}
- preferredType := prioritizedTypes.list[0]
- if preferredType != srcType {
- ic.manifestUpdates.ManifestMIMEType = preferredType
- } else {
+ res := manifestConversionPlan{
+ preferredMIMEType: prioritizedTypes.list[0],
+ otherMIMETypeCandidates: prioritizedTypes.list[1:],
+ }
+ res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType
+ if !res.preferredMIMETypeNeedsConversion {
logrus.Debugf("... will first try using the original manifest unmodified")
}
- return preferredType, prioritizedTypes.list[1:], nil
+ return res, nil
}
// isMultiImage returns true if img is a list of images
diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go
index e542d888c..562404470 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go
@@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@@ -140,8 +140,7 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src := newImageSource(ref)
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go
index 9a48cb46c..f00b77930 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go
@@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
- ctrImage "github.com/containers/image/v5/image"
+ ctrImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
@@ -185,11 +185,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return ctrImage.FromSource(ctx, sys, src)
+ return ctrImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
index 4e4ed6881..d75579784 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
@@ -6,7 +6,7 @@ import (
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@@ -195,11 +195,7 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index daac45f87..29c256869 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -163,9 +163,8 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
func serverDefault() *tls.Config {
return &tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
- MinVersion: tls.VersionTLS10,
- PreferServerCipherSuites: true,
- CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
+ MinVersion: tls.VersionTLS10,
+ CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
}
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index c84bb37d2..73687e86f 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -9,7 +9,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go
index b250a6b1d..e5a3b8991 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go
@@ -1,400 +1,14 @@
package image
import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/blobinfocache/none"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
+ "github.com/containers/image/v5/internal/image"
)
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
// in registries, so let’s use the same values.
-var GzippedEmptyLayer = []byte{
- 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
- 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
-}
+var GzippedEmptyLayer = image.GzippedEmptyLayer
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
-const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
-
-type manifestSchema2 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
- m *manifest.Schema2
-}
-
-func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
- m, err := manifest.Schema2FromManifest(manifestBlob)
- if err != nil {
- return nil, err
- }
- return &manifestSchema2{
- src: src,
- m: m,
- }, nil
-}
-
-// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
-func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
- return &manifestSchema2{
- src: src,
- configBlob: configBlob,
- m: manifest.Schema2FromComponents(config, layers),
- }
-}
-
-func (m *manifestSchema2) serialize() ([]byte, error) {
- return m.m.Serialize()
-}
-
-func (m *manifestSchema2) manifestMIMEType() string {
- return m.m.MediaType
-}
-
-// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
-// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
-func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
- return m.m.ConfigInfo()
-}
-
-// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
-// layers in the resulting configuration isn't guaranteed to be returned to due how
-// old image manifests work (docker v2s1 especially).
-func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
- configBlob, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
- // than OCI v1. This unmarshal makes sure we drop docker v2s2
- // fields that aren't needed in OCI v1.
- configOCI := &imgspecv1.Image{}
- if err := json.Unmarshal(configBlob, configOCI); err != nil {
- return nil, err
- }
- return configOCI, nil
-}
-
-// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
-// The result is cached; it is OK to call this however often you need.
-func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
- if m.configBlob == nil {
- if m.src == nil {
- return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
- }
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
- if err != nil {
- return nil, err
- }
- defer stream.Close()
- blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
- if err != nil {
- return nil, err
- }
- computedDigest := digest.FromBytes(blob)
- if computedDigest != m.m.ConfigDescriptor.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
- }
- m.configBlob = blob
- }
- return m.configBlob, nil
-}
-
-// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
- return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
-}
-
-// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
-// It returns false if the manifest does not embed a Docker reference.
-// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
-func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
- return false
-}
-
-// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
-func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
- getter := func(info types.BlobInfo) ([]byte, error) {
- if info.Digest != m.ConfigInfo().Digest {
- // Shouldn't ever happen
- return nil, errors.New("asked for a different config blob")
- }
- config, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- return config, nil
- }
- return m.m.Inspect(getter)
-}
-
-// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
-// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
-// (most importantly it forces us to download the full layers even if they are already present at the destination).
-func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
- return false
-}
-
-// UpdatedImage returns a types.Image modified according to options.
-// This does not change the state of the original Image object.
-// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
-// if the CompressionOperation and CompressionAlgorithm specified in one or more
-// options.LayerInfos items is anything other than gzip.
-func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
- copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
- src: m.src,
- configBlob: m.configBlob,
- m: manifest.Schema2Clone(m.m),
- }
-
- converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
- manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
- manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
- imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
- })
- if err != nil {
- return nil, err
- }
-
- if converted != nil {
- return converted, nil
- }
-
- // No conversion required, update manifest
- if options.LayerInfos != nil {
- if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
- return nil, err
- }
- }
- // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
-
- return memoryImageFromManifest(&copy), nil
-}
-
-func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
- return imgspecv1.Descriptor{
- MediaType: d.MediaType,
- Size: d.Size,
- Digest: d.Digest,
- URLs: d.URLs,
- }
-}
-
-// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestSchema2 object.
-func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
- configOCI, err := m.OCIConfig(ctx)
- if err != nil {
- return nil, err
- }
- configOCIBytes, err := json.Marshal(configOCI)
- if err != nil {
- return nil, err
- }
-
- config := imgspecv1.Descriptor{
- MediaType: imgspecv1.MediaTypeImageConfig,
- Size: int64(len(configOCIBytes)),
- Digest: digest.FromBytes(configOCIBytes),
- }
-
- layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
- for idx := range layers {
- layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
- switch m.m.LayersDescriptors[idx].MediaType {
- case manifest.DockerV2Schema2ForeignLayerMediaType:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
- case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
- case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
- case manifest.DockerV2Schema2LayerMediaType:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
- default:
- return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
- }
- }
-
- return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
-}
-
-// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestSchema2 object.
-//
-// Based on docker/distribution/manifest/schema1/config_builder.go
-func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
- dest := options.InformationOnly.Destination
-
- var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
- if options.LayerInfos != nil {
- if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
- return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
- len(options.LayerInfos), len(m.m.LayersDescriptors))
- }
- convertedLayerUpdates = []types.BlobInfo{}
- }
-
- configBytes, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- imageConfig := &manifest.Schema2Image{}
- if err := json.Unmarshal(configBytes, imageConfig); err != nil {
- return nil, err
- }
-
- // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
- fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
- history := make([]manifest.Schema1History, len(imageConfig.History))
- nonemptyLayerIndex := 0
- var parentV1ID string // Set in the loop
- v1ID := ""
- haveGzippedEmptyLayer := false
- if len(imageConfig.History) == 0 {
- // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
- return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
- }
- for v2Index, historyEntry := range imageConfig.History {
- parentV1ID = v1ID
- v1Index := len(imageConfig.History) - 1 - v2Index
-
- var blobDigest digest.Digest
- if historyEntry.EmptyLayer {
- emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
-
- if !haveGzippedEmptyLayer {
- logrus.Debugf("Uploading empty layer during conversion to schema 1")
- // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
- // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
- if err != nil {
- return nil, errors.Wrap(err, "uploading empty layer")
- }
- if info.Digest != emptyLayerBlobInfo.Digest {
- return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
- }
- haveGzippedEmptyLayer = true
- }
- if options.LayerInfos != nil {
- convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
- }
- blobDigest = emptyLayerBlobInfo.Digest
- } else {
- if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
- return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
- }
- if options.LayerInfos != nil {
- convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
- }
- blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
- nonemptyLayerIndex++
- }
-
- // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
- v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
- if err != nil {
- return nil, err
- }
- v1ID = v
-
- fakeImage := manifest.Schema1V1Compatibility{
- ID: v1ID,
- Parent: parentV1ID,
- Comment: historyEntry.Comment,
- Created: historyEntry.Created,
- Author: historyEntry.Author,
- ThrowAway: historyEntry.EmptyLayer,
- }
- fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
- v1CompatibilityBytes, err := json.Marshal(&fakeImage)
- if err != nil {
- return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
- }
-
- fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
- history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
- // Note that parentV1ID of the top layer is preserved when exiting this loop
- }
-
- // Now patch in real configuration for the top layer (v1Index == 0)
- v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
- if err != nil {
- return nil, err
- }
- v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
- if err != nil {
- return nil, err
- }
- history[0].V1Compatibility = string(v1Config)
-
- if options.LayerInfos != nil {
- options.LayerInfos = convertedLayerUpdates
- }
- m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
- if err != nil {
- return nil, err // This should never happen, we should have created all the components correctly.
- }
- return m1, nil
-}
-
-func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
- if err := blobDigest.Validate(); err != nil {
- return "", err
- }
- parts := append([]string{blobDigest.Hex()}, others...)
- v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
- return hex.EncodeToString(v1IDHash[:]), nil
-}
-
-func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
- // Preserve everything we don't specifically know about.
- // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
- rawContents := map[string]*json.RawMessage{}
- if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
- return nil, err
- }
- delete(rawContents, "rootfs")
- delete(rawContents, "history")
-
- updates := map[string]interface{}{"id": v1ID}
- if parentV1ID != "" {
- updates["parent"] = parentV1ID
- }
- if throwaway {
- updates["throwaway"] = throwaway
- }
- for field, value := range updates {
- encoded, err := json.Marshal(value)
- if err != nil {
- return nil, err
- }
- rawContents[field] = (*json.RawMessage)(&encoded)
- }
- return json.Marshal(rawContents)
-}
-
-// SupportsEncryption returns if encryption is supported for the manifest type
-func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
- return false
-}
+const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest
diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go
index 3a016e1d0..2b7f6b144 100644
--- a/vendor/github.com/containers/image/v5/image/sourced.go
+++ b/vendor/github.com/containers/image/v5/image/sourced.go
@@ -6,17 +6,10 @@ package image
import (
"context"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
)
-// imageCloser implements types.ImageCloser, perhaps allowing simple users
-// to use a single object without having keep a reference to a types.ImageSource
-// only to call types.ImageSource.Close().
-type imageCloser struct {
- types.Image
- src types.ImageSource
-}
-
// FromSource returns a types.ImageCloser implementation for the default instance of source.
// If source is a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
@@ -31,33 +24,7 @@ type imageCloser struct {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
- img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
- if err != nil {
- return nil, err
- }
- return &imageCloser{
- Image: img,
- src: src,
- }, nil
-}
-
-func (ic *imageCloser) Close() error {
- return ic.src.Close()
-}
-
-// sourcedImage is a general set of utilities for working with container images,
-// whatever is their underlying location (i.e. dockerImageSource-independent).
-// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
-// may not be a `sourcedImage` directly. However, most users of `types.Image`
-// do not care, and those who care about `skopeo/docker.Image` know they do.
-type sourcedImage struct {
- *UnparsedImage
- manifestBlob []byte
- manifestMIMEType string
- // genericManifest contains data corresponding to manifestBlob.
- // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
- // if you want to preserve the original manifest; use manifestBlob directly.
- genericManifest
+ return image.FromSource(ctx, sys, src)
}
// FromUnparsedImage returns a types.Image implementation for unparsed.
@@ -66,39 +33,5 @@ type sourcedImage struct {
//
// The Image must not be used after the underlying ImageSource is Close()d.
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
- // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
- // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
- // this is the only UnparsedImage implementation around, anyway.
-
- // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
- manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
- if err != nil {
- return nil, err
- }
-
- parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
- if err != nil {
- return nil, err
- }
-
- return &sourcedImage{
- UnparsedImage: unparsed,
- manifestBlob: manifestBlob,
- manifestMIMEType: manifestMIMEType,
- genericManifest: parsedManifest,
- }, nil
-}
-
-// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
-func (i *sourcedImage) Size() (int64, error) {
- return -1, nil
-}
-
-// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
-func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
- return i.manifestBlob, i.manifestMIMEType, nil
-}
-
-func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
+ return image.FromUnparsedImage(ctx, sys, unparsed)
}
diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go
index c64852f72..123f6ce6f 100644
--- a/vendor/github.com/containers/image/v5/image/unparsed.go
+++ b/vendor/github.com/containers/image/v5/image/unparsed.go
@@ -1,95 +1,19 @@
package image
import (
- "context"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// UnparsedImage implements types.UnparsedImage .
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
-type UnparsedImage struct {
- src types.ImageSource
- instanceDigest *digest.Digest
- cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
- // A private cache for Manifest(), may be the empty string if guessing failed.
- // Valid iff cachedManifest is not nil.
- cachedManifestMIMEType string
- cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
-}
+type UnparsedImage = image.UnparsedImage
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
- return &UnparsedImage{
- src: src,
- instanceDigest: instanceDigest,
- }
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
-func (i *UnparsedImage) Reference() types.ImageReference {
- // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
- return i.src.Reference()
-}
-
-// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
-func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
- if i.cachedManifest == nil {
- m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
- if err != nil {
- return nil, "", err
- }
-
- // ImageSource.GetManifest does not do digest verification, but we do;
- // this immediately protects also any user of types.Image.
- if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
- matches, err := manifest.MatchesDigest(m, digest)
- if err != nil {
- return nil, "", errors.Wrap(err, "computing manifest digest")
- }
- if !matches {
- return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
- }
- }
-
- i.cachedManifest = m
- i.cachedManifestMIMEType = mt
- }
- return i.cachedManifest, i.cachedManifestMIMEType, nil
-}
-
-// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
-// The bool return value seems redundant with digest != ""; it is used explicitly
-// to refuse (unexpected) situations when the digest exists but is "".
-func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
- if i.instanceDigest != nil {
- return *i.instanceDigest, true
- }
- ref := i.Reference().DockerReference()
- if ref != nil {
- if canonical, ok := ref.(reference.Canonical); ok {
- return canonical.Digest(), true
- }
- }
- return "", false
-}
-
-// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
-func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
- if i.cachedSignatures == nil {
- sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
- if err != nil {
- return nil, err
- }
- i.cachedSignatures = sigs
- }
- return i.cachedSignatures, nil
+ return image.UnparsedInstance(src, instanceDigest)
}
diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
index af78ac1df..af78ac1df 100644
--- a/vendor/github.com/containers/image/v5/image/docker_list.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
index 5f24970c3..94f776224 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
@@ -246,3 +246,12 @@ func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *ty
func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
return false
}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool {
+ return true // There are no MIME types in the manifest, so we must assume a valid image.
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
new file mode 100644
index 000000000..7dfd3c5d8
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
@@ -0,0 +1,413 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
+// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
+// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
+// in registries, so let’s use the same values.
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayer.
+var GzippedEmptyLayer = []byte{
+ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
+ 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
+}
+
+// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayerDigest.
+const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+
+type manifestSchema2 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ m *manifest.Schema2
+}
+
+func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema2FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema2{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
+func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
+ return &manifestSchema2{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.Schema2FromComponents(config, layers),
+ }
+}
+
+func (m *manifestSchema2) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestSchema2) manifestMIMEType() string {
+ return m.m.MediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ configBlob, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
+ // than OCI v1. This unmarshal makes sure we drop docker v2s2
+ // fields that aren't needed in OCI v1.
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(configBlob, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
+ }
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.ConfigDescriptor.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
+ return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
+ }
+ return m.m.Inspect(getter)
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
+// if the CompressionOperation and CompressionAlgorithm specified in one or more
+// options.LayerInfos items is anything other than gzip.
+func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.Schema2Clone(m.m),
+ }
+
+ converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
+ manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
+ manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
+ imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if converted != nil {
+ return converted, nil
+ }
+
+ // No conversion required, update manifest
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
+
+ return memoryImageFromManifest(&copy), nil
+}
+
+func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
+ return imgspecv1.Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
+ configOCI, err := m.OCIConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+ configOCIBytes, err := json.Marshal(configOCI)
+ if err != nil {
+ return nil, err
+ }
+
+ config := imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
+ }
+
+ layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
+ for idx := range layers {
+ layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
+ switch m.m.LayersDescriptors[idx].MediaType {
+ case manifest.DockerV2Schema2ForeignLayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
+ case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
+ case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema2LayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
+ default:
+ return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
+ }
+ }
+
+ return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
+}
+
+// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+//
+// Based on docker/distribution/manifest/schema1/config_builder.go
+func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ dest := options.InformationOnly.Destination
+
+ var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
+ if options.LayerInfos != nil {
+ if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ len(options.LayerInfos), len(m.m.LayersDescriptors))
+ }
+ convertedLayerUpdates = []types.BlobInfo{}
+ }
+
+ configBytes, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ imageConfig := &manifest.Schema2Image{}
+ if err := json.Unmarshal(configBytes, imageConfig); err != nil {
+ return nil, err
+ }
+
+ // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
+ fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
+ history := make([]manifest.Schema1History, len(imageConfig.History))
+ nonemptyLayerIndex := 0
+ var parentV1ID string // Set in the loop
+ v1ID := ""
+ haveGzippedEmptyLayer := false
+ if len(imageConfig.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
+ }
+ for v2Index, historyEntry := range imageConfig.History {
+ parentV1ID = v1ID
+ v1Index := len(imageConfig.History) - 1 - v2Index
+
+ var blobDigest digest.Digest
+ if historyEntry.EmptyLayer {
+ emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
+
+ if !haveGzippedEmptyLayer {
+ logrus.Debugf("Uploading empty layer during conversion to schema 1")
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
+ if err != nil {
+ return nil, errors.Wrap(err, "uploading empty layer")
+ }
+ if info.Digest != emptyLayerBlobInfo.Digest {
+ return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
+ }
+ haveGzippedEmptyLayer = true
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
+ }
+ blobDigest = emptyLayerBlobInfo.Digest
+ } else {
+ if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
+ return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
+ }
+ blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
+ nonemptyLayerIndex++
+ }
+
+ // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
+ v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
+ if err != nil {
+ return nil, err
+ }
+ v1ID = v
+
+ fakeImage := manifest.Schema1V1Compatibility{
+ ID: v1ID,
+ Parent: parentV1ID,
+ Comment: historyEntry.Comment,
+ Created: historyEntry.Created,
+ Author: historyEntry.Author,
+ ThrowAway: historyEntry.EmptyLayer,
+ }
+ fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
+ v1CompatibilityBytes, err := json.Marshal(&fakeImage)
+ if err != nil {
+ return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
+ }
+
+ fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
+ history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
+ // Note that parentV1ID of the top layer is preserved when exiting this loop
+ }
+
+ // Now patch in real configuration for the top layer (v1Index == 0)
+ v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
+ if err != nil {
+ return nil, err
+ }
+ v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
+ if err != nil {
+ return nil, err
+ }
+ history[0].V1Compatibility = string(v1Config)
+
+ if options.LayerInfos != nil {
+ options.LayerInfos = convertedLayerUpdates
+ }
+ m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
+ if err != nil {
+ return nil, err // This should never happen, we should have created all the components correctly.
+ }
+ return m1, nil
+}
+
+func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
+ if err := blobDigest.Validate(); err != nil {
+ return "", err
+ }
+ parts := append([]string{blobDigest.Hex()}, others...)
+ v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
+ return hex.EncodeToString(v1IDHash[:]), nil
+}
+
+func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+ delete(rawContents, "rootfs")
+ delete(rawContents, "history")
+
+ updates := map[string]interface{}{"id": v1ID}
+ if parentV1ID != "" {
+ updates["parent"] = parentV1ID
+ }
+ if throwaway {
+ updates["throwaway"] = throwaway
+ }
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
+ return false
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go
index 36d70b5c2..6b5f34538 100644
--- a/vendor/github.com/containers/image/v5/image/manifest.go
+++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go
@@ -12,9 +12,8 @@ import (
)
// genericManifest is an interface for parsing, modifying image manifests and related data.
-// Note that the public methods are intended to be a subset of types.Image
-// so that embedding a genericManifest into structs works.
-// will support v1 one day...
+// The public methods are related to types.Image so that embedding a genericManifest implements most of it,
+// but there are also public methods that are only visible by packages that can import c/image/internal/image.
type genericManifest interface {
serialize() ([]byte, error)
manifestMIMEType() string
@@ -51,6 +50,16 @@ type genericManifest interface {
// the process of updating a manifest between different manifest types was to update then convert.
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
SupportsEncryption(ctx context.Context) bool
+
+ // The following methods are not a part of types.Image:
+ // ===
+
+ // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+ // (and the code can handle that).
+ // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+ // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+ // to a different manifest format).
+ CanChangeLayerCompression(mimeType string) bool
}
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go
index 4c96b37d8..4c96b37d8 100644
--- a/vendor/github.com/containers/image/v5/image/memory.go
+++ b/vendor/github.com/containers/image/v5/internal/image/memory.go
diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go
index 58e9c03ba..af1a90e82 100644
--- a/vendor/github.com/containers/image/v5/image/oci.go
+++ b/vendor/github.com/containers/image/v5/internal/image/oci.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
@@ -84,6 +85,10 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
// layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially).
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
+ }
+
cb, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
@@ -194,10 +199,15 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
+ }
+
// Create a copy of the descriptor.
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
- // The only difference between OCI and DockerSchema2 is the mediatypes. The
+ // Above, we have already checked that this manifest refers to an image, not an OCI artifact,
+ // so the only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
@@ -233,7 +243,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
- // We can't directly convert to V1, but we can transitively convert via a V2 image
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
+ }
+
+ // We can't directly convert images to V1, but we can transitively convert via a V2 image
m2, err := m.convertToManifestSchema2(ctx, options)
if err != nil {
return nil, err
@@ -246,3 +260,12 @@ func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *ty
func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
return true
}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
index d6e6685b1..d6e6685b1 100644
--- a/vendor/github.com/containers/image/v5/image/oci_index.go
+++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go
new file mode 100644
index 000000000..dc09a9e04
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/sourced.go
@@ -0,0 +1,134 @@
+// Package image consolidates knowledge about various container image formats
+// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
+// and exposes all of them using an unified interface.
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+)
+
+// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
+// If reference poitns to a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) {
+ src, err := ref.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+ img, err := FromSource(ctx, sys, src)
+ if err != nil {
+ src.Close()
+ return nil, err
+ }
+ return img, nil
+}
+
+// imageCloser implements types.ImageCloser, perhaps allowing simple users
+// to use a single object without having keep a reference to a types.ImageSource
+// only to call types.ImageSource.Close().
+type imageCloser struct {
+ types.Image
+ src types.ImageSource
+}
+
+// FromSource returns a types.ImageCloser implementation for the default instance of source.
+// If source is a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// FromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// Image and ImageSource objects simultaneously, but it means that they only need to
+// the Image.)
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+//
+// Most callers can use either FromUnparsedImage or FromReference instead.
+//
+// This is publicly visible as c/image/image.FromSource.
+func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
+ img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
+ if err != nil {
+ return nil, err
+ }
+ return &imageCloser{
+ Image: img,
+ src: src,
+ }, nil
+}
+
+func (ic *imageCloser) Close() error {
+ return ic.src.Close()
+}
+
+// SourcedImage is a general set of utilities for working with container images,
+// whatever is their underlying transport (i.e. ImageSource-independent).
+// Note the existence of docker.Image and image.memoryImage: various instances
+// of a types.Image may not be a SourcedImage directly.
+//
+// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do.
+//
+// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image.
+type SourcedImage struct {
+ *UnparsedImage
+ ManifestBlob []byte // The manifest of the relevant instance
+ ManifestMIMEType string // MIME type of ManifestBlob
+ // genericManifest contains data corresponding to manifestBlob.
+ // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
+ // if you want to preserve the original manifest; use manifestBlob directly.
+ genericManifest
+}
+
+// FromUnparsedImage returns a types.Image implementation for unparsed.
+// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate single image.
+//
+// The Image must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.FromUnparsedImage.
+func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) {
+ // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
+ // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
+ // this is the only UnparsedImage implementation around, anyway.
+
+ // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
+ manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SourcedImage{
+ UnparsedImage: unparsed,
+ ManifestBlob: manifestBlob,
+ ManifestMIMEType: manifestMIMEType,
+ genericManifest: parsedManifest,
+ }, nil
+}
+
+// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
+func (i *SourcedImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
+func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ return i.ManifestBlob, i.ManifestMIMEType, nil
+}
+
+func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
new file mode 100644
index 000000000..8ea0f61b4
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
@@ -0,0 +1,99 @@
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// UnparsedImage implements types.UnparsedImage .
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// This is publicly visible as c/image/image.UnparsedImage.
+type UnparsedImage struct {
+ src types.ImageSource
+ instanceDigest *digest.Digest
+ cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
+ // A private cache for Manifest(), may be the empty string if guessing failed.
+ // Valid iff cachedManifest is not nil.
+ cachedManifestMIMEType string
+ cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
+}
+
+// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
+//
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.UnparsedInstance.
+func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
+ return &UnparsedImage{
+ src: src,
+ instanceDigest: instanceDigest,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *UnparsedImage) Reference() types.ImageReference {
+ // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
+ return i.src.Reference()
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ if i.cachedManifest == nil {
+ m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // ImageSource.GetManifest does not do digest verification, but we do;
+ // this immediately protects also any user of types.Image.
+ if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
+ matches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "computing manifest digest")
+ }
+ if !matches {
+ return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
+ }
+ }
+
+ i.cachedManifest = m
+ i.cachedManifestMIMEType = mt
+ }
+ return i.cachedManifest, i.cachedManifestMIMEType, nil
+}
+
+// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
+// The bool return value seems redundant with digest != ""; it is used explicitly
+// to refuse (unexpected) situations when the digest exists but is "".
+func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
+ if i.instanceDigest != nil {
+ return *i.instanceDigest, true
+ }
+ ref := i.Reference().DockerReference()
+ if ref != nil {
+ if canonical, ok := ref.(reference.Canonical); ok {
+ return canonical.Digest(), true
+ }
+ }
+ return "", false
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
+ if i.cachedSignatures == nil {
+ sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ i.cachedSignatures = sigs
+ }
+ return i.cachedSignatures, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go
new file mode 100644
index 000000000..e5732a8c4
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go
@@ -0,0 +1,32 @@
+package manifest
+
+import "fmt"
+
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
+//
+// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor)
+type NonImageArtifactError struct {
+ // Callers should not be blindly calling image-specific operations and only checking MIME types
+ // on failure; if they care about the artifact type, they should check before using it.
+ // If they blindly assume an image, they don’t really need this value; just a type check
+ // is sufficient for basic "we can only pull images" UI.
+ //
+ // Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig,
+ // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers
+ // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of
+ // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions
+ // based on that kind of data.
+ //
+ // So, let’s not expose this until a specific need is identified.
+ mimeType string
+}
+
+// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType.
+func NewNonImageArtifactError(mimeType string) error {
+ return NonImageArtifactError{mimeType: mimeType}
+}
+
+func (e NonImageArtifactError) Error() string {
+ return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
index 20955ab7f..9cf7dd3a9 100644
--- a/vendor/github.com/containers/image/v5/manifest/common.go
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -118,6 +118,18 @@ type compressionMIMETypeSet map[string]string
const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
+// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found
+func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet {
+ for _, variants := range variantTable {
+ for _, mt := range variants {
+ if mt == mimeType {
+ return variants
+ }
+ }
+ }
+ return nil
+}
+
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
// to mean "no compression"), based on variantTable.
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
@@ -130,29 +142,26 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type")
}
- for _, variants := range variantTable {
- for _, mt := range variants {
- if mt == mimeType { // Found the variant
- name := mtsUncompressed
- if algorithm != nil {
- name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
- }
- if res, ok := variants[name]; ok {
- if res != mtsUnsupportedMIMEType {
- return res, nil
- }
- if name != mtsUncompressed {
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)}
- }
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
- }
- if name != mtsUncompressed {
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)}
- }
- // We can't very well say “the idea of no compression is unknown”
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ if variants != nil {
+ name := mtsUncompressed
+ if algorithm != nil {
+ name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
+ }
+ if res, ok := variants[name]; ok {
+ if res != mtsUnsupportedMIMEType {
+ return res, nil
}
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)}
+ }
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
+ }
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
}
+ // We can't very well say “the idea of no compression is unknown”
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
if algorithm != nil {
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
@@ -209,3 +218,13 @@ type ManifestLayerCompressionIncompatibilityError struct {
func (m ManifestLayerCompressionIncompatibilityError) Error() string {
return m.text
}
+
+// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType
+// Note that the caller still needs to worry about a specific algorithm not being supported.
+func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool {
+ if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
+ return false
+ }
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 1f4db54ee..8b3fbdd39 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -295,3 +295,11 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
}
return m.ConfigDescriptor.Digest.Hex(), nil
}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *Schema2) CanChangeLayerCompression(mimeType string) bool {
+ return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go
index 2e3e5da15..53fc866a7 100644
--- a/vendor/github.com/containers/image/v5/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/manifest/manifest.go
@@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
@@ -34,6 +35,10 @@ const (
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
+type NonImageArtifactError = internalManifest.NonImageArtifactError
+
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error {
switch m {
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 5892184df..11927ab5e 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -5,6 +5,7 @@ import (
"fmt"
"strings"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
@@ -115,6 +116,12 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
// CompressionAlgorithm that isn't supported by OCI.
+//
+// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on
+// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs
+// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression
+// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently
+// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers.
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
@@ -151,6 +158,33 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
return nil
}
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support encryption
+func getEncryptedMediaType(mediatype string) (string, error) {
+ for _, s := range strings.Split(mediatype, "+")[1:] {
+ if s == "encrypted" {
+ return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
+ }
+ }
+ unsuffixedMediatype := strings.Split(mediatype, "+")[0]
+ switch unsuffixedMediatype {
+ case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
+ return mediatype + "+encrypted", nil
+ }
+
+ return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
+}
+
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support decryption
+func getDecryptedMediaType(mediatype string) (string, error) {
+ if !strings.HasSuffix(mediatype, "+encrypted") {
+ return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
+ }
+
+ return strings.TrimSuffix(mediatype, "+encrypted"), nil
+}
+
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *OCI1) Serialize() ([]byte, error) {
@@ -159,6 +193,14 @@ func (m *OCI1) Serialize() ([]byte, error) {
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos.
+ // Most software calling this without human intervention is going to expect the values to be realistic and relevant,
+ // and is probably better served by failing; we can always re-visit that later if we fail now, but
+ // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later.
+ return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
+ }
+
config, err := configGetter(m.ConfigInfo())
if err != nil {
return nil, err
@@ -186,35 +228,39 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
// ImageID computes an ID which can uniquely identify this image by its contents.
func (m *OCI1) ImageID([]digest.Digest) (string, error) {
+ // The way m.Config.Digest “uniquely identifies” an image is
+ // by containing RootFS.DiffIDs, which identify the layers of the image.
+ // For non-image artifacts, the we can’t expect the config to change
+ // any time the other layers (semantically) change, so this approach of
+ // distinguishing objects only by m.Config.Digest doesn’t work in general.
+ //
+ // Any caller of this method presumably wants to disambiguate the same
+ // images with a different representation, but doesn’t want to disambiguate
+ // representations (by using a manifest digest). So, submitting a non-image
+ // artifact to such a caller indicates an expectation mismatch.
+ // So, we just fail here instead of inventing some other ID value (e.g.
+ // by combining the config and blob layer digests). That still
+ // gives us the option to not fail, and return some value, in the future,
+ // without committing to that approach now.
+ // (The only known caller of ImageID is storage/storageImageDestination.computeID,
+ // which can’t work with non-image artifacts.)
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
+ }
+
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
return m.Config.Digest.Hex(), nil
}
-// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
-// an error if the mediatype does not support encryption
-func getEncryptedMediaType(mediatype string) (string, error) {
- for _, s := range strings.Split(mediatype, "+")[1:] {
- if s == "encrypted" {
- return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
- }
- }
- unsuffixedMediatype := strings.Split(mediatype, "+")[0]
- switch unsuffixedMediatype {
- case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
- return mediatype + "+encrypted", nil
- }
-
- return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
-}
-
-// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
-// an error if the mediatype does not support decryption
-func getDecryptedMediaType(mediatype string) (string, error) {
- if !strings.HasSuffix(mediatype, "+encrypted") {
- return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *OCI1) CanChangeLayerCompression(mimeType string) bool {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return false
}
-
- return strings.TrimSuffix(mediatype, "+encrypted"), nil
+ return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType)
}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
index 4fa912765..74fefbd4f 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
@@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/oci/internal"
ocilayout "github.com/containers/image/v5/oci/layout"
@@ -122,11 +122,7 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index a99b63158..a9029a609 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -10,7 +10,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/oci/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
@@ -154,11 +154,7 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
index 6bbb43be2..c8d65c78a 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
@@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
- genericImage "github.com/containers/image/v5/image"
+ genericImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
@@ -132,11 +132,7 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(sys, ref)
- if err != nil {
- return nil, err
- }
- return genericImage.FromSource(ctx, sys, src)
+ return genericImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
index 1e35ab605..6c4262368 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
@@ -14,7 +14,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
@@ -184,17 +184,7 @@ func (s *ostreeImageCloser) Size() (int64, error) {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- var tmpDir string
- if sys == nil || sys.OSTreeTmpDirPath == "" {
- tmpDir = os.TempDir()
- } else {
- tmpDir = sys.OSTreeTmpDirPath
- }
- src, err := newImageSource(tmpDir, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
index 8b22733ac..c9971cbdc 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
@@ -9,7 +9,7 @@ import (
"sync"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
@@ -158,11 +158,7 @@ func (b *BlobCache) ClearCache() error {
}
func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := b.NewImageSource(ctx, sys)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(b.reference))
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, b)
}
func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go
index 18d894bc3..2037f2508 100644
--- a/vendor/github.com/containers/image/v5/sif/transport.go
+++ b/vendor/github.com/containers/image/v5/sif/transport.go
@@ -9,7 +9,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
)
@@ -139,11 +139,7 @@ func (ref sifReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 8071e3b32..06f90d363 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -16,7 +16,7 @@ import (
"sync/atomic"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/tmpdir"
@@ -486,7 +486,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
-// The caller must arrange the blob to be eventually commited using s.commitLayer().
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
@@ -641,7 +641,7 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
-// The caller must arrange the blob to be eventually commited using s.commitLayer().
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
@@ -941,7 +941,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
s.lock.Unlock()
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
- if err != nil {
+ if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
return errors.Wrapf(err, "failed to put layer from digest and labels")
}
lastLayer = layer.ID
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
index 23f67c49e..690067ec3 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
@@ -7,7 +7,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -67,16 +67,7 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := r.NewImageSource(ctx, sys)
- if err != nil {
- return nil, err
- }
- img, err := image.FromSource(ctx, sys, src)
- if err != nil {
- src.Close()
- return nil, err
- }
- return img, nil
+ return image.FromReference(ctx, sys, r)
}
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml
index e4dd4a402..1036c8d3f 100644
--- a/vendor/github.com/containers/ocicrypt/.travis.yml
+++ b/vendor/github.com/containers/ocicrypt/.travis.yml
@@ -21,7 +21,7 @@ addons:
go_import_path: github.com/containers/ocicrypt
install:
- - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0
+ - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2
script:
- make
diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go
index a789d052d..c537a20a0 100644
--- a/vendor/github.com/containers/ocicrypt/config/constructors.go
+++ b/vendor/github.com/containers/ocicrypt/config/constructors.go
@@ -21,7 +21,7 @@ import (
"strings"
"github.com/pkg/errors"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys
diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
index 76be34138..5a8bc022c 100644
--- a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
+++ b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
@@ -24,7 +24,7 @@ import (
"github.com/containers/ocicrypt/crypto/pkcs11"
"github.com/pkg/errors"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// OcicryptConfig represents the format of an imgcrypt.conf config file
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
index 7fcd2e3af..c6d47e830 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
@@ -17,7 +17,7 @@ import (
"fmt"
"github.com/pkg/errors"
pkcs11uri "github.com/stefanberger/go-pkcs11uri"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// Pkcs11KeyFile describes the format of the pkcs11 (private) key file.
diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod
index 8837d288e..46ee2a289 100644
--- a/vendor/github.com/containers/ocicrypt/go.mod
+++ b/vendor/github.com/containers/ocicrypt/go.mod
@@ -17,5 +17,5 @@ require (
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
google.golang.org/grpc v1.33.2
gopkg.in/square/go-jose.v2 v2.5.1
- gopkg.in/yaml.v2 v2.4.0
+ gopkg.in/yaml.v3 v3.0.0
)
diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum
index a621a145c..86e36e768 100644
--- a/vendor/github.com/containers/ocicrypt/go.sum
+++ b/vendor/github.com/containers/ocicrypt/go.sum
@@ -111,7 +111,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index fd3d31054..53b13cd33 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -17,14 +17,14 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
- FEDORA_NAME: "fedora-35"
- PRIOR_FEDORA_NAME: "fedora-34"
- UBUNTU_NAME: "ubuntu-2104"
+ FEDORA_NAME: "fedora-36"
+ PRIOR_FEDORA_NAME: "fedora-35"
+ UBUNTU_NAME: "ubuntu-2204"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
- IMAGE_SUFFIX: "c4512539143831552"
+ IMAGE_SUFFIX: "c5878804328480768"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go
new file mode 100644
index 000000000..cf608d479
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go
@@ -0,0 +1,109 @@
+//go:build darwin
+// +build darwin
+
+package graphdriver
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+ "syscall"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+)
+
+type inode struct {
+ Dev uint64
+ Ino uint64
+}
+
+type platformChowner struct {
+ mutex sync.Mutex
+ inodes map[inode]bool
+}
+
+func newLChowner() *platformChowner {
+ return &platformChowner{
+ inodes: make(map[inode]bool),
+ }
+}
+
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+ st, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ i := inode{
+ Dev: uint64(st.Dev),
+ Ino: uint64(st.Ino),
+ }
+ c.mutex.Lock()
+ _, found := c.inodes[i]
+ if !found {
+ c.inodes[i] = true
+ }
+ c.mutex.Unlock()
+
+ if found {
+ return nil
+ }
+
+ // Map an on-disk UID/GID pair from host to container
+ // using the first map, then back to the host using the
+ // second map. Skip that first step if they're 0, to
+ // compensate for cases where a parent layer should
+ // have had a mapped value, but didn't.
+ uid, gid := int(st.Uid), int(st.Gid)
+ if toContainer != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedUID, mappedGID, err := toContainer.ToContainer(pair)
+ if err != nil {
+ if (uid != 0) || (gid != 0) {
+ return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
+ }
+ mappedUID, mappedGID = uid, gid
+ }
+ uid, gid = mappedUID, mappedGID
+ }
+ if toHost != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedPair, err := toHost.ToHostOverflow(pair)
+ if err != nil {
+ return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
+ }
+ uid, gid = mappedPair.UID, mappedPair.GID
+ }
+ if uid != int(st.Uid) || gid != int(st.Gid) {
+ cap, err := system.Lgetxattr(path, "security.capability")
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
+ return fmt.Errorf("%s: %v", os.Args[0], err)
+ }
+
+ // Make the change.
+ if err := system.Lchown(path, uid, gid); err != nil {
+ return fmt.Errorf("%s: %v", os.Args[0], err)
+ }
+ // Restore the SUID and SGID bits if they were originally set.
+ if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
+ if err := system.Chmod(path, info.Mode()); err != nil {
+ return fmt.Errorf("%s: %v", os.Args[0], err)
+ }
+ }
+ if cap != nil {
+ if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
+ return fmt.Errorf("%s: %v", os.Args[0], err)
+ }
+ }
+
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index c598b936d..65756d8ec 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -1,5 +1,5 @@
-//go:build !windows
-// +build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
package graphdriver
@@ -21,12 +21,12 @@ type inode struct {
type platformChowner struct {
mutex sync.Mutex
- inodes map[inode]bool
+ inodes map[inode]string
}
func newLChowner() *platformChowner {
return &platformChowner{
- inodes: make(map[inode]bool),
+ inodes: make(map[inode]string),
}
}
@@ -40,15 +40,33 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
Dev: uint64(st.Dev),
Ino: uint64(st.Ino),
}
+
c.mutex.Lock()
- _, found := c.inodes[i]
+
+ oldTarget, found := c.inodes[i]
if !found {
- c.inodes[i] = true
+ c.inodes[i] = path
+ }
+
+ // If we are dealing with a file with multiple links then keep the lock until the file is
+ // chowned to avoid a race where we link to the old version if the file is copied up.
+ if found || st.Nlink > 1 {
+ defer c.mutex.Unlock()
+ } else {
+ c.mutex.Unlock()
}
- c.mutex.Unlock()
if found {
- return nil
+ // If the dev/inode was already chowned then create a link to the old target instead
+ // of chowning it again. This is necessary when the underlying file system breaks
+ // inodes on copy-up (as it is with overlay with index=off) to maintain the original
+ // link and correct file ownership.
+
+ // The target already exists so remove it before creating the link to the new target.
+ if err := os.Remove(path); err != nil {
+ return err
+ }
+ return os.Link(oldTarget, path)
}
// Map an on-disk UID/GID pair from host to container
diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go
new file mode 100644
index 000000000..357851543
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go
@@ -0,0 +1,14 @@
+package graphdriver
+
+var (
+ // Slice of drivers that should be used in order
+ priority = []string{
+ "vfs",
+ }
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ // Note it is OK to return FsMagicUnsupported on Windows.
+ return FsMagicUnsupported, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go
index 0fe3eea7a..7c527d279 100644
--- a/vendor/github.com/containers/storage/drivers/driver_linux.go
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package graphdriver
@@ -162,11 +163,32 @@ func (c *defaultChecker) IsMounted(path string) bool {
return m
}
+// isMountPoint checks that the given path is a mount point
+func isMountPoint(mountPath string) (bool, error) {
+ // it is already the root
+ if mountPath == "/" {
+ return true, nil
+ }
+
+ var s1, s2 unix.Stat_t
+ if err := unix.Stat(mountPath, &s1); err != nil {
+ return true, err
+ }
+ if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil {
+ return true, err
+ }
+ return s1.Dev != s2.Dev, nil
+}
+
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t
+
if err := unix.Statfs(mountPath, &buf); err != nil {
return false, err
}
- return FsMagic(buf.Type) == fsType, nil
+ if FsMagic(buf.Type) != fsType {
+ return false, nil
+ }
+ return isMountPoint(mountPath)
}
diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
index 4a875608b..3932c3ea5 100644
--- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!freebsd,!solaris
+// +build !linux,!windows,!freebsd,!solaris,!darwin
package graphdriver
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index b7e681ace..b619317e0 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -2,6 +2,8 @@ package graphdriver
import (
"io"
+ "os"
+ "runtime"
"time"
"github.com/containers/storage/pkg/archive"
@@ -170,9 +172,16 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
}
defer driver.Put(id)
+ defaultForceMask := os.FileMode(0700)
+ var forceMask *os.FileMode = nil
+ if runtime.GOOS == "darwin" {
+ forceMask = &defaultForceMask
+ }
+
tarOptions := &archive.TarOptions{
InUserNS: userns.RunningInUserNS(),
IgnoreChownErrors: options.IgnoreChownErrors,
+ ForceMask: forceMask,
}
if options.Mappings != nil {
tarOptions.UIDMaps = options.Mappings.UIDs()
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index 1b58e2f63..b1073d55f 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -5,6 +5,7 @@ import (
"io"
"os"
"path/filepath"
+ "runtime"
"strconv"
"strings"
@@ -170,6 +171,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
}()
rootPerms := defaultPerms
+ if runtime.GOOS == "darwin" {
+ rootPerms = os.FileMode(0700)
+ }
+
if parent != "" {
st, err := system.Stat(d.dir(parent))
if err != nil {
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 1b9f25bcb..7d8151b57 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -5,30 +5,30 @@ module github.com/containers/storage
require (
github.com/BurntSushi/toml v1.1.0
github.com/Microsoft/go-winio v0.5.2
- github.com/Microsoft/hcsshim v0.9.2
+ github.com/Microsoft/hcsshim v0.9.3
github.com/containerd/stargz-snapshotter/estargz v0.11.4
github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.15.4
+ github.com/klauspost/compress v1.15.6
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
- github.com/moby/sys/mountinfo v0.6.1
+ github.com/moby/sys/mountinfo v0.6.2
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.1.1
+ github.com/opencontainers/runc v1.1.1-0.20220607072441-a7a45d7d2721
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.10.1
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1
- github.com/stretchr/testify v1.7.1
+ github.com/stretchr/testify v1.7.2
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/ulikunitz/xz v0.5.10
github.com/vbatts/tar-split v0.11.2
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
gotest.tools v2.2.0+incompatible
)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 6587fddb3..6fbca4e4f 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -57,8 +57,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
-github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
+github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -107,7 +107,7 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -266,6 +266,7 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
@@ -299,7 +300,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -425,8 +426,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ=
-github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
+github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -436,6 +437,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -467,9 +469,8 @@ github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQ
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
-github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc=
-github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -521,8 +522,8 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU=
-github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.1-0.20220607072441-a7a45d7d2721 h1:geG4wjkUPHyg+Ya/BBb8YlX1z4INWpVMdoUnmBxttqc=
+github.com/opencontainers/runc v1.1.1-0.20220607072441-a7a45d7d2721/go.mod h1:QvA0UNe48mC1JxcXq0sENIR38+/LdJMLNxuAvtFBhxA=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -534,7 +535,6 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
-github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w=
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -580,12 +580,13 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -624,8 +625,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@@ -643,6 +644,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
@@ -848,10 +850,9 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -990,6 +991,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1018,8 +1020,9 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index d4f129ee6..570000e82 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -12,6 +12,7 @@ import (
"os"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"sync"
"syscall"
@@ -72,10 +73,10 @@ type (
)
const (
- tarExt = "tar"
- solaris = "solaris"
- windows = "windows"
- containersOverrideXattr = "user.containers.override_stat"
+ tarExt = "tar"
+ solaris = "solaris"
+ windows = "windows"
+ darwin = "darwin"
)
var xattrsToIgnore = map[string]interface{}{
@@ -698,9 +699,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
}
- if forceMask != nil && hdr.Typeflag != tar.TypeSymlink {
+ if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777)
- if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil {
+ if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
return err
}
}
@@ -981,7 +982,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
uid, gid, mode, err := GetFileOwner(dest)
if err == nil {
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
- if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
+ if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
return err
}
}
@@ -1313,6 +1314,21 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
if err != nil {
return err
}
+ } else if runtime.GOOS == darwin {
+ uid, gid = hdr.Uid, hdr.Gid
+ if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[0], 10, 32)
+ if err != nil {
+ uid = int(val)
+ }
+ val, err = strconv.ParseUint(attrs[1], 10, 32)
+ if err != nil {
+ gid = int(val)
+ }
+ }
+ }
} else {
uid, gid = hdr.Uid, hdr.Gid
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
new file mode 100644
index 000000000..d257cc8e9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
@@ -0,0 +1,21 @@
+package chrootarchive
+
+import (
+ "io"
+
+ "github.com/containers/storage/pkg/archive"
+)
+
+func chroot(path string) error {
+ return nil
+}
+
+func invokeUnpack(decompressedArchive io.ReadCloser,
+ dest string,
+ options *archive.TarOptions, root string) error {
+ return archive.Unpack(decompressedArchive, dest, options)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ return archive.TarWithOptions(srcPath, options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index 9da10fe33..e4b45a454 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!darwin
package chrootarchive
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
index 83278ee50..d5aedd002 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!linux
+// +build !windows,!linux,!darwin
package chrootarchive
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
new file mode 100644
index 000000000..d6326c808
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
@@ -0,0 +1,41 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/storage/pkg/archive"
+)
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+ dest = filepath.Clean(dest)
+
+ if decompress {
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompressed.Close()
+
+ layer = decompressed
+ }
+
+ tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract")
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
+ }
+
+ s, err := archive.UnpackLayer(dest, layer, options)
+ os.RemoveAll(tmpDir)
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
+ }
+
+ return s, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
index 84253c6aa..6dd5146cc 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -1,4 +1,4 @@
-//+build !windows
+//+build !windows,!darwin
package chrootarchive
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
new file mode 100644
index 000000000..fa17c9bf8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
@@ -0,0 +1,4 @@
+package chrootarchive
+
+func init() {
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
index ea08135e4..45caec972 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!darwin
package chrootarchive
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 9434499d2..7b6cd8fe4 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -272,14 +272,6 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
return canDedupMetadataWithHardLink(file, &otherFile)
}
-func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) {
- digester := digest.Canonical.Digester()
- if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil {
- return "", err
- }
- return digester.Digest(), nil
-}
-
// findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible.
// file is the file to look for.
// ostreeRepos is a list of OSTree repos.
@@ -330,75 +322,6 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di
return false, nil, 0, nil
}
-// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
-// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
-// paths.
-// file is the file to look for.
-// dirfd is an open fd to the destination checkout.
-// useHardLinks defines whether the deduplication can be performed using hard links.
-func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) {
- sourceFile := filepath.Clean(filepath.Join("/", file.Name))
- if !strings.HasPrefix(sourceFile, "/usr/") {
- // limit host deduplication to files under /usr.
- return false, nil, 0, nil
- }
-
- st, err := os.Stat(sourceFile)
- if err != nil || !st.Mode().IsRegular() {
- return false, nil, 0, nil
- }
-
- if st.Size() != file.Size {
- return false, nil, 0, nil
- }
-
- fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
- if err != nil {
- return false, nil, 0, nil
- }
-
- f := os.NewFile(uintptr(fd), "fd")
- defer f.Close()
-
- manifestChecksum, err := digest.Parse(file.Digest)
- if err != nil {
- return false, nil, 0, err
- }
-
- checksum, err := getFileDigest(f, buf)
- if err != nil {
- return false, nil, 0, err
- }
-
- if checksum != manifestChecksum {
- return false, nil, 0, nil
- }
-
- // check if the open file can be deduplicated with hard links
- useHardLinks = useHardLinks && canDedupFileWithHardLink(file, fd, st)
-
- dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks)
- if err != nil {
- return false, nil, 0, nil
- }
-
- // calculate the checksum again to make sure the file wasn't modified while it was copied
- if _, err := f.Seek(0, 0); err != nil {
- dstFile.Close()
- return false, nil, 0, err
- }
- checksum, err = getFileDigest(f, buf)
- if err != nil {
- dstFile.Close()
- return false, nil, 0, err
- }
- if checksum != manifestChecksum {
- dstFile.Close()
- return false, nil, 0, nil
- }
- return true, dstFile, written, nil
-}
-
// findFileInOtherLayers finds the specified file in other layers.
// cache is the layers cache to use.
// file is the file to look for.
@@ -1297,10 +1220,9 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo
}
type findAndCopyFileOptions struct {
- useHardLinks bool
- enableHostDedup bool
- ostreeRepos []string
- options *archive.TarOptions
+ useHardLinks bool
+ ostreeRepos []string
+ options *archive.TarOptions
}
func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
@@ -1336,18 +1258,6 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop
return true, nil
}
- if copyOptions.enableHostDedup {
- found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer)
- if err != nil {
- return false, err
- }
- if found {
- if err := finalizeFile(dstFile); err != nil {
- return false, err
- }
- return true, nil
- }
- }
return false, nil
}
@@ -1376,8 +1286,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
return output, errors.New("enable_partial_images not configured")
}
- enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false)
-
// When the hard links deduplication is used, file attributes are ignored because setting them
// modifies the source file as well.
useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false)
@@ -1426,10 +1334,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
missingPartsSize, totalChunksSize := int64(0), int64(0)
copyOptions := findAndCopyFileOptions{
- useHardLinks: useHardLinks,
- enableHostDedup: enableHostDedup,
- ostreeRepos: ostreeRepos,
- options: options,
+ useHardLinks: useHardLinks,
+ ostreeRepos: ostreeRepos,
+ options: options,
}
type copyFileJob struct {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 7a8fec0ce..3ae2a1cd7 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -6,6 +6,7 @@ import (
"io/ioutil"
"os"
"os/user"
+ "runtime"
"sort"
"strconv"
"strings"
@@ -38,8 +39,9 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
- subuidFileName string = "/etc/subuid"
- subgidFileName string = "/etc/subgid"
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+ ContainersOverrideXattr = "user.containers.override_stat"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
@@ -366,6 +368,25 @@ func checkChownErr(err error, name string, uid, gid int) error {
}
func SafeChown(name string, uid, gid int) error {
+ if runtime.GOOS == "darwin" {
+ var mode uint64 = 0o0700
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
+ if err == nil {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[2], 8, 32)
+ if err == nil {
+ mode = val
+ }
+ }
+ }
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
+ if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ uid = os.Getuid()
+ gid = os.Getgid()
+ }
if stat, statErr := system.Stat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
@@ -375,6 +396,25 @@ func SafeChown(name string, uid, gid int) error {
}
func SafeLchown(name string, uid, gid int) error {
+ if runtime.GOOS == "darwin" {
+ var mode uint64 = 0o0700
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
+ if err == nil {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[2], 8, 32)
+ if err == nil {
+ mode = val
+ }
+ }
+ }
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
+ if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ uid = os.Getuid()
+ gid = os.Getgid()
+ }
if stat, statErr := system.Lstat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
index 6e6e3b22b..c96465369 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -1,8 +1,10 @@
+//go:build linux && cgo && libsubid
// +build linux,cgo,libsubid
package idtools
import (
+ "os/user"
"unsafe"
"github.com/pkg/errors"
@@ -32,19 +34,34 @@ import "C"
func readSubid(username string, isUser bool) (ranges, error) {
var ret ranges
+ uidstr := ""
+
if username == "ALL" {
return nil, errors.New("username ALL not supported")
}
+ if u, err := user.Lookup(username); err == nil {
+ uidstr = u.Uid
+ }
+
cUsername := C.CString(username)
defer C.free(unsafe.Pointer(cUsername))
+ cuidstr := C.CString(uidstr)
+ defer C.free(unsafe.Pointer(cuidstr))
+
var nRanges C.int
var cRanges *C.struct_subid_range
if isUser {
nRanges = C.subid_get_uid_ranges(cUsername, &cRanges)
+ if nRanges <= 0 {
+ nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges)
+ }
} else {
nRanges = C.subid_get_gid_ranges(cUsername, &cRanges)
+ if nRanges <= 0 {
+ nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges)
+ }
}
if nRanges < 0 {
return nil, errors.New("cannot read subids")
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index fc080acbe..aa843920f 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -1,15 +1,19 @@
+//go:build linux || solaris || darwin || freebsd
// +build linux solaris darwin freebsd
package lockfile
import (
+ "bytes"
+ cryptorand "crypto/rand"
+ "encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
+ "sync/atomic"
"time"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
@@ -23,13 +27,44 @@ type lockfile struct {
counter int64
file string
fd uintptr
- lw string
+ lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID()
locktype int16
locked bool
ro bool
recursive bool
}
+const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
+var lastWriterIDCounter uint64 // Private state for newLastWriterID
+
+// newLastWriterID returns a new "last writer" ID.
+// The value must be different on every call, and also differ from values
+// generated by other processes.
+func newLastWriterID() []byte {
+ // The ID is (PID, time, per-process counter, random)
+ // PID + time represents both a unique process across reboots,
+ // and a specific time within the process; the per-process counter
+ // is an extra safeguard for in-process concurrency.
+ // The random part disambiguates across process namespaces
+ // (where PID values might collide), serves as a general-purpose
+ // extra safety, _and_ is used to pad the output to lastWriterIDSize,
+ // because other versions of this code exist and they don't work
+ // efficiently if the size of the value changes.
+ pid := os.Getpid()
+ tm := time.Now().UnixNano()
+ counter := atomic.AddUint64(&lastWriterIDCounter, 1)
+
+ res := make([]byte, lastWriterIDSize)
+ binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
+ binary.LittleEndian.PutUint64(res[8:16], counter)
+ binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
+ if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
+ panic(err) // This shouldn't happen
+ }
+
+ return res
+}
+
// openLock opens the file at path and returns the corresponding file
// descriptor. Note that the path is opened read-only when ro is set. If ro
// is unset, openLock will open the path read-write and create the file if
@@ -89,7 +124,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
stateMutex: &sync.Mutex{},
rwMutex: &sync.RWMutex{},
file: path,
- lw: stringid.GenerateRandomID(),
+ lw: newLastWriterID(),
locktype: int16(locktype),
locked: false,
ro: ro}, nil
@@ -212,13 +247,12 @@ func (l *lockfile) Touch() error {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
- l.lw = stringid.GenerateRandomID()
- id := []byte(l.lw)
- n, err := unix.Pwrite(int(l.fd), id, 0)
+ l.lw = newLastWriterID()
+ n, err := unix.Pwrite(int(l.fd), l.lw, 0)
if err != nil {
return err
}
- if n != len(id) {
+ if n != len(l.lw) {
return unix.ENOSPC
}
return nil
@@ -228,21 +262,21 @@ func (l *lockfile) Touch() error {
// was loaded.
func (l *lockfile) Modified() (bool, error) {
l.stateMutex.Lock()
- id := []byte(l.lw)
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
- n, err := unix.Pread(int(l.fd), id, 0)
+ currentLW := make([]byte, len(l.lw))
+ n, err := unix.Pread(int(l.fd), currentLW, 0)
if err != nil {
return true, err
}
- if n != len(id) {
+ if n != len(l.lw) {
return true, nil
}
- lw := l.lw
- l.lw = string(id)
- return l.lw != lw, nil
+ oldLW := l.lw
+ l.lw = currentLW
+ return !bytes.Equal(currentLW, oldLW), nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
index a0c7c42a0..4c434f0e5 100644
--- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go
+++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
@@ -12,6 +12,7 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
"time"
)
@@ -20,6 +21,9 @@ const shortLen = 12
var (
validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
+
+ rngLock sync.Mutex
+ rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held.
)
// IsShortID determines if an arbitrary string *looks like* a short ID.
@@ -67,7 +71,9 @@ func GenerateRandomID() string {
// secure sources of random.
// It helps you to save entropy.
func GenerateNonCryptoID() string {
- return generateID(readerFunc(rand.Read))
+ rngLock.Lock()
+ defer rngLock.Unlock()
+ return generateID(readerFunc(rng.Read))
}
// ValidateID checks whether an ID string is a valid image ID.
@@ -79,7 +85,7 @@ func ValidateID(id string) error {
}
func init() {
- // safely set the seed globally so we generate random ids. Tries to use a
+ // Initialize a private RNG so we generate random ids. Tries to use a
// crypto seed before falling back to time.
var seed int64
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
@@ -89,7 +95,7 @@ func init() {
seed = cryptoseed.Int64()
}
- rand.Seed(seed)
+ rng = rand.New(rand.NewSource(seed))
}
type readerFunc func(p []byte) (int, error)
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
new file mode 100644
index 000000000..a0183885b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
@@ -0,0 +1,84 @@
+//go:build freebsd && cgo
+// +build freebsd,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// #include <unistd.h>
+// #include <sys/vmmeter.h>
+// #include <sys/sysctl.h>
+// #include <vm/vm_param.h>
+import "C"
+
+func getMemInfo() (int64, int64, error) {
+ data, err := unix.SysctlRaw("vm.vmtotal")
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't get kernel info: %v", err)
+ }
+ if len(data) != C.sizeof_struct_vmtotal {
+ return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data))
+ }
+
+ total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0]))
+
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE))
+ npages := int64(C.sysconf(C._SC_PHYS_PAGES))
+ return pagesize * npages, pagesize * int64(total.t_free), nil
+}
+
+func getSwapInfo() (int64, int64, error) {
+ var (
+ total int64 = 0
+ used int64 = 0
+ )
+ swapCount, err := unix.SysctlUint32("vm.nswapdev")
+ if err != nil {
+ return -1, -1, fmt.Errorf("error reading vm.nswapdev: %v", err)
+ }
+ for i := 0; i < int(swapCount); i++ {
+ data, err := unix.SysctlRaw("vm.swap_info", i)
+ if err != nil {
+ return -1, -1, fmt.Errorf("error reading vm.swap_info.%d: %v", i, err)
+ }
+ if len(data) != C.sizeof_struct_xswdev {
+ return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data))
+ }
+ xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0]))
+ total += int64(xsw.xsw_nblks)
+ used += int64(xsw.xsw_used)
+ }
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE))
+ return pagesize * total, pagesize * (total - used), nil
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ MemTotal, MemFree, err := getMemInfo()
+ if err != nil {
+ return nil, fmt.Errorf("error getting memory totals %v\n", err)
+ }
+ SwapTotal, SwapFree, err := getSwapInfo()
+ if err != nil {
+ return nil, fmt.Errorf("error getting swap totals %v\n", err)
+ }
+
+ if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 {
+ return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
index 3ce019dff..0f9feb1d2 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
@@ -1,4 +1,8 @@
-// +build !linux,!windows,!solaris
+//go:build !linux && !windows && !solaris && !(freebsd && cgo)
+// +build !linux
+// +build !windows
+// +build !solaris
+// +build !freebsd !cgo
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
new file mode 100644
index 000000000..75275b964
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
@@ -0,0 +1,84 @@
+package system
+
+import (
+ "bytes"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // Value is larger than the maximum size allowed
+ E2BIG unix.Errno = unix.E2BIG
+
+ // Operation not supported
+ EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
+)
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// Returns a []byte slice if the xattr is set and nil otherwise.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ // Start with a 128 length byte array
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+
+ for errno == unix.ERANGE {
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, errno = unix.Lgetxattr(path, attr, []byte{})
+ if errno != nil {
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
+ }
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ }
+
+ switch {
+ case errno == unix.ENOATTR:
+ return nil, nil
+ case errno != nil:
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
+ }
+
+ return dest[:sz], nil
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ if err := unix.Lsetxattr(path, attr, data, flags); err != nil {
+ return &os.PathError{Op: "lsetxattr", Path: path, Err: err}
+ }
+
+ return nil
+}
+
+// Llistxattr lists extended attributes associated with the given path
+// in the file system.
+func Llistxattr(path string) ([]string, error) {
+ dest := make([]byte, 128)
+ sz, errno := unix.Llistxattr(path, dest)
+
+ for errno == unix.ERANGE {
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, errno = unix.Llistxattr(path, []byte{})
+ if errno != nil {
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
+ }
+
+ dest = make([]byte, sz)
+ sz, errno = unix.Llistxattr(path, dest)
+ }
+ if errno != nil {
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
+ }
+
+ var attrs []string
+ for _, token := range bytes.Split(dest[:sz], []byte{0}) {
+ if len(token) > 0 {
+ attrs = append(attrs, string(token))
+ }
+ }
+
+ return attrs, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
index 3fc27f0b1..221eb78bc 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!darwin
package system
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
index c0e359b27..f5a7c3a25 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
@@ -1,4 +1,4 @@
-#ifndef UNSHARE_NO_CODE_AT_ALL
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__)
#define _GNU_SOURCE
#include <sys/types.h>
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
index 53cfeb0ec..221c7e088 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
@@ -7,7 +7,7 @@ import (
"sync"
"github.com/pkg/errors"
- "github.com/syndtr/gocapability/capability"
+ "github.com/sirupsen/logrus"
)
var (
@@ -38,19 +38,13 @@ func HomeDir() (string, error) {
return homeDir, homeDirErr
}
-// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
-func HasCapSysAdmin() (bool, error) {
- hasCapSysAdminOnce.Do(func() {
- currentCaps, err := capability.NewPid2(0)
- if err != nil {
- hasCapSysAdminErr = err
- return
- }
- if err = currentCaps.Load(); err != nil {
- hasCapSysAdminErr = err
- return
+func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
+ if err != nil {
+ if format != "" {
+ logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
+ } else {
+ logrus.Errorf("%v", err)
}
- hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)
- })
- return hasCapSysAdminRet, hasCapSysAdminErr
+ os.Exit(1)
+ }
}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
index b3f8099f6..6a6f21d9c 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
@@ -1,4 +1,5 @@
-// +build linux,cgo,!gccgo
+//go:build (linux && cgo && !gccgo) || (freebsd && cgo)
+// +build linux,cgo,!gccgo freebsd,cgo
package unshare
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
new file mode 100644
index 000000000..01cf33bde
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
@@ -0,0 +1,53 @@
+// +build darwin
+
+package unshare
+
+import (
+ "os"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+ // UsernsEnvName is the environment variable, if set indicates in rootless mode
+ UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
+)
+
+// IsRootless tells us if we are running in rootless mode
+func IsRootless() bool {
+ return true
+}
+
+// GetRootlessUID returns the UID of the user in the parent userNS
+func GetRootlessUID() int {
+ return os.Getuid()
+}
+
+// RootlessEnv returns the environment settings for the rootless containers
+func RootlessEnv() []string {
+ return append(os.Environ(), UsernsEnvName+"=")
+}
+
+// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
+func MaybeReexecUsingUserNamespace(evenForRoot bool) {
+}
+
+// GetHostIDMappings reads mappings for the specified process (or the current
+// process if pid is "self" or an empty string) from the kernel.
+func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
+ return nil, nil, nil
+}
+
+// ParseIDMappings parses mapping triples.
+func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
+ uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ return uid, gid, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c
new file mode 100644
index 000000000..0b2f17886
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c
@@ -0,0 +1,76 @@
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__)
+
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+static int _containers_unshare_parse_envint(const char *envname) {
+ char *p, *q;
+ long l;
+
+ p = getenv(envname);
+ if (p == NULL) {
+ return -1;
+ }
+ q = NULL;
+ l = strtol(p, &q, 10);
+ if ((q == NULL) || (*q != '\0')) {
+ fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
+ _exit(1);
+ }
+ unsetenv(envname);
+ return l;
+}
+
+void _containers_unshare(void)
+{
+ int pidfd, continuefd, n, pgrp, sid, ctty;
+ char buf[2048];
+
+ pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
+ if (pidfd != -1) {
+ snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
+ size_t size = write(pidfd, buf, strlen(buf));
+ if (size != strlen(buf)) {
+ fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
+ _exit(1);
+ }
+ close(pidfd);
+ }
+ continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
+ if (continuefd != -1) {
+ n = read(continuefd, buf, sizeof(buf));
+ if (n > 0) {
+ fprintf(stderr, "Error: %.*s\n", n, buf);
+ _exit(1);
+ }
+ close(continuefd);
+ }
+ sid = _containers_unshare_parse_envint("_Containers-setsid");
+ if (sid == 1) {
+ if (setsid() == -1) {
+ fprintf(stderr, "Error during setsid: %m\n");
+ _exit(1);
+ }
+ }
+ pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
+ if (pgrp == 1) {
+ if (setpgrp(0, 0) == -1) {
+ fprintf(stderr, "Error during setpgrp: %m\n");
+ _exit(1);
+ }
+ }
+ ctty = _containers_unshare_parse_envint("_Containers-ctty");
+ if (ctty != -1) {
+ if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
+ fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
+ _exit(1);
+ }
+ }
+}
+
+#endif
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
new file mode 100644
index 000000000..aec416720
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
@@ -0,0 +1,179 @@
+//go:build freebsd
+// +build freebsd
+
+package unshare
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "syscall"
+
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Cmd wraps an exec.Cmd created by the reexec package in unshare(),
+// and one day might handle setting ID maps and other related setting*s
+// by triggering initialization code in the child.
+type Cmd struct {
+ *exec.Cmd
+ Setsid bool
+ Setpgrp bool
+ Ctty *os.File
+ Hook func(pid int) error
+}
+
+// Command creates a new Cmd which can be customized.
+func Command(args ...string) *Cmd {
+ cmd := reexec.Command(args...)
+ return &Cmd{
+ Cmd: cmd,
+ }
+}
+
+func (c *Cmd) Start() error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // Set environment variables to tell the child to synchronize its startup.
+ if c.Env == nil {
+ c.Env = os.Environ()
+ }
+
+ // Create the pipe for reading the child's PID.
+ pidRead, pidWrite, err := os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating pid pipe")
+ }
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, pidWrite)
+
+ // Create the pipe for letting the child know to proceed.
+ continueRead, continueWrite, err := os.Pipe()
+ if err != nil {
+ pidRead.Close()
+ pidWrite.Close()
+ return errors.Wrapf(err, "error creating pid pipe")
+ }
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, continueRead)
+
+ // Pass along other instructions.
+ if c.Setsid {
+ c.Env = append(c.Env, "_Containers-setsid=1")
+ }
+ if c.Setpgrp {
+ c.Env = append(c.Env, "_Containers-setpgrp=1")
+ }
+ if c.Ctty != nil {
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
+ }
+
+ // Make sure we clean up our pipes.
+ defer func() {
+ if pidRead != nil {
+ pidRead.Close()
+ }
+ if pidWrite != nil {
+ pidWrite.Close()
+ }
+ if continueRead != nil {
+ continueRead.Close()
+ }
+ if continueWrite != nil {
+ continueWrite.Close()
+ }
+ }()
+
+ // Start the new process.
+ err = c.Cmd.Start()
+ if err != nil {
+ return err
+ }
+
+ // Close the ends of the pipes that the parent doesn't need.
+ continueRead.Close()
+ continueRead = nil
+ pidWrite.Close()
+ pidWrite = nil
+
+ // Read the child's PID from the pipe.
+ pidString := ""
+ b := new(bytes.Buffer)
+ if _, err := io.Copy(b, pidRead); err != nil {
+ return errors.Wrapf(err, "Reading child PID")
+ }
+ pidString = b.String()
+ pid, err := strconv.Atoi(pidString)
+ if err != nil {
+ fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
+ return errors.Wrapf(err, "error parsing PID %q", pidString)
+ }
+
+ // Run any additional setup that we want to do before the child starts running proper.
+ if c.Hook != nil {
+ if err = c.Hook(pid); err != nil {
+ fmt.Fprintf(continueWrite, "hook error: %v", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Cmd) Run() error {
+ if err := c.Start(); err != nil {
+ return err
+ }
+ return c.Wait()
+}
+
+func (c *Cmd) CombinedOutput() ([]byte, error) {
+ return nil, errors.New("unshare: CombinedOutput() not implemented")
+}
+
+func (c *Cmd) Output() ([]byte, error) {
+ return nil, errors.New("unshare: Output() not implemented")
+}
+
+type Runnable interface {
+ Run() error
+}
+
+// ExecRunnable runs the specified unshare command, captures its exit status,
+// and exits with the same status.
+func ExecRunnable(cmd Runnable, cleanup func()) {
+ exit := func(status int) {
+ if cleanup != nil {
+ cleanup()
+ }
+ os.Exit(status)
+ }
+ if err := cmd.Run(); err != nil {
+ if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
+ if exitError.ProcessState.Exited() {
+ if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
+ if waitStatus.Exited() {
+ logrus.Debugf("%v", exitError)
+ exit(waitStatus.ExitStatus())
+ }
+ if waitStatus.Signaled() {
+ logrus.Debugf("%v", exitError)
+ exit(int(waitStatus.Signal()) + 128)
+ }
+ }
+ }
+ }
+ logrus.Errorf("%v", err)
+ logrus.Errorf("(Unable to determine exit status)")
+ exit(1)
+ }
+ exit(0)
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index baeb8f1aa..16d14d2a9 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -78,7 +78,7 @@ func getRootlessGID() int {
}
// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the
-// matching file capabilitiy
+// matching file capability
func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) {
info, err := os.Stat(path)
if err != nil {
@@ -414,17 +414,6 @@ type Runnable interface {
Run() error
}
-func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
- if err != nil {
- if format != "" {
- logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
- } else {
- logrus.Errorf("%v", err)
- }
- os.Exit(1)
- }
-}
-
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// If we've already been through this once, no need to try again.
@@ -674,3 +663,20 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap,
}
return uid, gid, nil
}
+
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
+func HasCapSysAdmin() (bool, error) {
+ hasCapSysAdminOnce.Do(func() {
+ currentCaps, err := capability.NewPid2(0)
+ if err != nil {
+ hasCapSysAdminErr = err
+ return
+ }
+ if err = currentCaps.Load(); err != nil {
+ hasCapSysAdminErr = err
+ return
+ }
+ hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)
+ })
+ return hasCapSysAdminRet, hasCapSysAdminErr
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
index bf4d567b8..66dd54596 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
@@ -1,4 +1,5 @@
-// +build !linux
+//go:build !linux && !darwin
+// +build !linux,!darwin
package unshare
@@ -43,3 +44,8 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
return nil, nil, nil
}
+
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
+func HasCapSysAdmin() (bool, error) {
+ return os.Geteuid() == 0, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
index d5f2d22a8..a6b38eda8 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
@@ -1,4 +1,5 @@
-// +build !linux,cgo
+//go:build cgo && !(linux || freebsd)
+// +build cgo,!linux,!freebsd
package unshare
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index c17dd6d37..e075bce13 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -40,6 +40,28 @@ graphroot = "/var/lib/containers/storage"
additionalimagestores = [
]
+# Allows specification of how storage is populated when pulling images. This
+# option can speed the pulling process of images compressed with format
+# zstd:chunked. Containers/storage looks for files within images that are being
+# pulled from a container registry that were previously pulled to the host. It
+# can copy or create a hard link to the existing file when it finds them,
+# eliminating the need to pull them from the container registry. These options
+# can deduplicate pulling of content, disk storage of content and can allow the
+# kernel to use less memory when running containers.
+
+# containers/storage supports four keys
+# * enable_partial_images="true" | "false"
+# Tells containers/storage to look for files previously pulled in storage
+# rather then always pulling them from the container registry.
+# * use_hard_links = "false" | "true"
+# Tells containers/storage to use hard links rather then create new files in
+# the image, if an identical file already existed in storage.
+# * ostree_repos = ""
+# Tells containers/storage where an ostree repository exists that might have
+# previously pulled content which can be used when attempting to avoid
+# pulling content from the container registry
+pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""}
+
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 45912d0ca..4b074eea2 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -21,7 +21,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
@@ -173,6 +172,7 @@ type Store interface {
GraphRoot() string
GraphDriverName() string
GraphOptions() []string
+ PullOptions() map[string]string
UIDMap() []idtools.IDMap
GIDMap() []idtools.IDMap
@@ -607,6 +607,7 @@ type store struct {
graphRoot string
graphDriverName string
graphOptions []string
+ pullOptions map[string]string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
@@ -726,6 +727,7 @@ func GetStore(options types.StoreOptions) (Store, error) {
additionalGIDs: nil,
usernsLock: usernsLock,
disableVolatile: options.DisableVolatile,
+ pullOptions: options.PullOptions,
}
if err := s.load(); err != nil {
return nil, err
@@ -776,6 +778,14 @@ func (s *store) GraphOptions() []string {
return s.graphOptions
}
+func (s *store) PullOptions() map[string]string {
+ cp := make(map[string]string, len(s.pullOptions))
+ for k, v := range s.pullOptions {
+ cp[k] = v
+ }
+ return cp
+}
+
func (s *store) UIDMap() []idtools.IDMap {
return copyIDMap(s.uidMap)
}
@@ -1005,9 +1015,6 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
if err := rcstore.ReloadIfChanged(); err != nil {
return nil, -1, err
}
- if id == "" {
- id = stringid.GenerateRandomID()
- }
if options == nil {
options = &LayerOptions{}
}
@@ -1086,10 +1093,6 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string
}
func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) {
- if id == "" {
- id = stringid.GenerateRandomID()
- }
-
if layer != "" {
lstore, err := s.LayerStore()
if err != nil {
@@ -1268,9 +1271,6 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if err != nil {
return nil, err
}
- if id == "" {
- id = stringid.GenerateRandomID()
- }
var imageTopLayer *Layer
imageID := ""
@@ -1326,14 +1326,14 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
}
if cimage == nil {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", image)
}
imageID = cimage.ID
}
if options.AutoUserNs {
var err error
- options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage)
+ options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index d318421a4..a55bf62c3 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -26,15 +26,24 @@ type TomlConfig struct {
}
const (
- overlayDriver = "overlay"
- overlay2 = "overlay2"
+ overlayDriver = "overlay"
+ overlay2 = "overlay2"
+ storageConfEnv = "CONTAINERS_STORAGE_CONF"
)
-func init() {
+var (
+ defaultStoreOptionsOnce sync.Once
+)
+
+func loaddefaultStoreOptions() {
defaultStoreOptions.RunRoot = defaultRunRoot
defaultStoreOptions.GraphRoot = defaultGraphRoot
defaultStoreOptions.GraphDriverName = ""
+ if path, ok := os.LookupEnv(storageConfEnv); ok {
+ defaultOverrideConfigFile = path
+ }
+
if _, err := os.Stat(defaultOverrideConfigFile); err == nil {
// The DefaultConfigFile(rootless) function returns the path
// of the used storage.conf file, by returning defaultConfigFile
@@ -64,6 +73,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
defaultRootlessGraphRoot string
err error
)
+ defaultStoreOptionsOnce.Do(loaddefaultStoreOptions)
storageOpts := defaultStoreOptions
if rootless && rootlessUID != 0 {
storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
@@ -187,6 +197,7 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
return opts, err
}
opts.RunRoot = rootlessRuntime
+ opts.PullOptions = systemOpts.PullOptions
if systemOpts.RootlessStoragePath != "" {
opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID)
if err != nil {
@@ -203,7 +214,7 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
opts.GraphDriverName = driver
}
if opts.GraphDriverName == overlay2 {
- logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.")
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
opts.GraphDriverName = overlayDriver
}
@@ -280,7 +291,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if err == nil {
keys := meta.Undecoded()
if len(keys) > 0 {
- logrus.Warningf("Failed to decode the keys %q from %q.", keys, configFile)
+ logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile)
}
} else {
if !os.IsNotExist(err) {
@@ -299,11 +310,11 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
storeOptions.GraphDriverName = config.Storage.Driver
}
if storeOptions.GraphDriverName == overlay2 {
- logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.")
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
storeOptions.GraphDriverName = overlayDriver
}
if storeOptions.GraphDriverName == "" {
- logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile)
+ logrus.Errorf("The storage 'driver' option must be set in %s to guarantee proper operation", configFile)
}
if config.Storage.RunRoot != "" {
storeOptions.RunRoot = config.Storage.RunRoot
@@ -390,6 +401,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
}
func Options() StoreOptions {
+ defaultStoreOptionsOnce.Do(loaddefaultStoreOptions)
return defaultStoreOptions
}
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index 4dd1a786e..c7f0d0fad 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -170,7 +170,7 @@ func DefaultConfigFile(rootless bool) (string, error) {
return defaultConfigFile, nil
}
- if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
+ if path, ok := os.LookupEnv(storageConfEnv); ok {
return path, nil
}
if !rootless {
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 523c92dc8..13ebd4021 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -124,7 +124,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
// getMaxSizeFromImage returns the maximum ID used by the specified image.
// The layer stores must be already locked.
-func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) {
+func (s *store) getMaxSizeFromImage(image *Image, passwdFile, groupFile string) (uint32, error) {
lstore, err := s.LayerStore()
if err != nil {
return 0, err
@@ -183,7 +183,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
- clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false)
+ clayer, err := rlstore.Create("", topLayer, nil, "", nil, layerOptions, false)
if err != nil {
return 0, err
}
@@ -211,7 +211,7 @@ outer:
}
// getAutoUserNS creates an automatic user namespace
-func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) {
+func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) {
requestedSize := uint32(0)
initialSize := uint32(1)
if options.Size > 0 {
@@ -250,7 +250,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image
size = s.autoNsMinSize
}
if image != nil {
- sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile)
+ sizeFromImage, err := s.getMaxSizeFromImage(image, options.PasswdFile, options.GroupFile)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index b6bca4cef..0bbe74700 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -3347,7 +3347,7 @@ definitions:
Limits:
description: "Define resources limits."
$ref: "#/definitions/Limit"
- Reservation:
+ Reservations:
description: "Define resources reservation."
$ref: "#/definitions/ResourceObject"
RestartPolicy:
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
index 5734d47d4..d27152c0f 100644
--- a/vendor/github.com/docker/docker/pkg/system/mknod.go
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -7,12 +7,6 @@ import (
"golang.org/x/sys/unix"
)
-// Mknod creates a filesystem node (file, device special file or named pipe) named path
-// with attributes specified by mode and dev.
-func Mknod(path string, mode uint32, dev int) error {
- return unix.Mknod(path, mode, dev)
-}
-
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go
new file mode 100644
index 000000000..c890be116
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go
@@ -0,0 +1,14 @@
+//go:build freebsd
+// +build freebsd
+
+package system // import "github.com/docker/docker/pkg/system"
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, uint64(dev))
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go
new file mode 100644
index 000000000..4586aad19
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go
@@ -0,0 +1,14 @@
+//go:build !freebsd && !windows
+// +build !freebsd,!windows
+
+package system // import "github.com/docker/docker/pkg/system"
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
diff --git a/vendor/github.com/docker/libnetwork/resolvconf/README.md b/vendor/github.com/docker/libnetwork/resolvconf/README.md
deleted file mode 100644
index cdda554ba..000000000
--- a/vendor/github.com/docker/libnetwork/resolvconf/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf
diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
deleted file mode 100644
index e348bc57f..000000000
--- a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package dns
-
-import (
- "regexp"
-)
-
-// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
-const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
-
-// IPv4Localhost is a regex pattern for IPv4 localhost address range.
-const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})`
-
-var localhostIPRegexp = regexp.MustCompile(IPLocalhost)
-var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost)
-
-// IsLocalhost returns true if ip matches the localhost IP regular expression.
-// Used for determining if nameserver settings are being passed which are
-// localhost addresses
-func IsLocalhost(ip string) bool {
- return localhostIPRegexp.MatchString(ip)
-}
-
-// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression.
-func IsIPv4Localhost(ip string) bool {
- return localhostIPv4Regexp.MatchString(ip)
-}
diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go
deleted file mode 100644
index 946bb8712..000000000
--- a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf
-package resolvconf
-
-import (
- "bytes"
- "io/ioutil"
- "regexp"
- "strings"
- "sync"
-
- "github.com/docker/docker/pkg/ioutils"
- "github.com/docker/libnetwork/resolvconf/dns"
- "github.com/docker/libnetwork/types"
- "github.com/sirupsen/logrus"
-)
-
-const (
- // defaultPath is the default path to the resolv.conf that contains information to resolve DNS. See Path().
- defaultPath = "/etc/resolv.conf"
- // alternatePath is a path different from defaultPath, that may be used to resolve DNS. See Path().
- alternatePath = "/run/systemd/resolve/resolv.conf"
-)
-
-var (
- detectSystemdResolvConfOnce sync.Once
- pathAfterSystemdDetection = defaultPath
-)
-
-// Path returns the path to the resolv.conf file that libnetwork should use.
-//
-// When /etc/resolv.conf contains 127.0.0.53 as the only nameserver, then
-// it is assumed systemd-resolved manages DNS. Because inside the container 127.0.0.53
-// is not a valid DNS server, Path() returns /run/systemd/resolve/resolv.conf
-// which is the resolv.conf that systemd-resolved generates and manages.
-// Otherwise Path() returns /etc/resolv.conf.
-//
-// Errors are silenced as they will inevitably resurface at future open/read calls.
-//
-// More information at https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html#/etc/resolv.conf
-func Path() string {
- detectSystemdResolvConfOnce.Do(func() {
- candidateResolvConf, err := ioutil.ReadFile(defaultPath)
- if err != nil {
- // silencing error as it will resurface at next calls trying to read defaultPath
- return
- }
- ns := GetNameservers(candidateResolvConf, types.IP)
- if len(ns) == 1 && ns[0] == "127.0.0.53" {
- pathAfterSystemdDetection = alternatePath
- logrus.Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath)
- }
- })
- return pathAfterSystemdDetection
-}
-
-var (
- // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
- defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
- defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"}
- ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
- ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
- // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also
- // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
- // -- e.g. other link-local types -- either won't work in containers or are unnecessary.
- // For readability and sufficiency for Docker purposes this seemed more reasonable than a
- // 1000+ character regexp with exact and complete IPv6 validation
- ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?`
-
- localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`)
- nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`)
- nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
- nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`)
- nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`)
- searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`)
- optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`)
-)
-
-var lastModified struct {
- sync.Mutex
- sha256 string
- contents []byte
-}
-
-// File contains the resolv.conf content and its hash
-type File struct {
- Content []byte
- Hash string
-}
-
-// Get returns the contents of /etc/resolv.conf and its hash
-func Get() (*File, error) {
- return GetSpecific(Path())
-}
-
-// GetSpecific returns the contents of the user specified resolv.conf file and its hash
-func GetSpecific(path string) (*File, error) {
- resolv, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- hash, err := ioutils.HashData(bytes.NewReader(resolv))
- if err != nil {
- return nil, err
- }
- return &File{Content: resolv, Hash: hash}, nil
-}
-
-// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash
-// and, if modified since last check, returns the bytes and new hash.
-// This feature is used by the resolv.conf updater for containers
-func GetIfChanged() (*File, error) {
- lastModified.Lock()
- defer lastModified.Unlock()
-
- resolv, err := ioutil.ReadFile(Path())
- if err != nil {
- return nil, err
- }
- newHash, err := ioutils.HashData(bytes.NewReader(resolv))
- if err != nil {
- return nil, err
- }
- if lastModified.sha256 != newHash {
- lastModified.sha256 = newHash
- lastModified.contents = resolv
- return &File{Content: resolv, Hash: newHash}, nil
- }
- // nothing changed, so return no data
- return nil, nil
-}
-
-// GetLastModified retrieves the last used contents and hash of the host resolv.conf.
-// Used by containers updating on restart
-func GetLastModified() *File {
- lastModified.Lock()
- defer lastModified.Unlock()
-
- return &File{Content: lastModified.contents, Hash: lastModified.sha256}
-}
-
-// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs:
-// 1. It looks for localhost (127.*|::1) entries in the provided
-// resolv.conf, removing local nameserver entries, and, if the resulting
-// cleaned config has no defined nameservers left, adds default DNS entries
-// 2. Given the caller provides the enable/disable state of IPv6, the filter
-// code will remove all IPv6 nameservers if it is not enabled for containers
-//
-func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) {
- cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})
- // if IPv6 is not enabled, also clean out any IPv6 address nameserver
- if !ipv6Enabled {
- cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{})
- }
- // if the resulting resolvConf has no more nameservers defined, add appropriate
- // default DNS servers for IPv4 and (optionally) IPv6
- if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 {
- logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns)
- dns := defaultIPv4Dns
- if ipv6Enabled {
- logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns)
- dns = append(dns, defaultIPv6Dns...)
- }
- cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...)
- }
- hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf))
- if err != nil {
- return nil, err
- }
- return &File{Content: cleanedResolvConf, Hash: hash}, nil
-}
-
-// getLines parses input into lines and strips away comments.
-func getLines(input []byte, commentMarker []byte) [][]byte {
- lines := bytes.Split(input, []byte("\n"))
- var output [][]byte
- for _, currentLine := range lines {
- var commentIndex = bytes.Index(currentLine, commentMarker)
- if commentIndex == -1 {
- output = append(output, currentLine)
- } else {
- output = append(output, currentLine[:commentIndex])
- }
- }
- return output
-}
-
-// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
-func GetNameservers(resolvConf []byte, kind int) []string {
- nameservers := []string{}
- for _, line := range getLines(resolvConf, []byte("#")) {
- var ns [][]byte
- if kind == types.IP {
- ns = nsRegexp.FindSubmatch(line)
- } else if kind == types.IPv4 {
- ns = nsIPv4Regexpmatch.FindSubmatch(line)
- } else if kind == types.IPv6 {
- ns = nsIPv6Regexpmatch.FindSubmatch(line)
- }
- if len(ns) > 0 {
- nameservers = append(nameservers, string(ns[1]))
- }
- }
- return nameservers
-}
-
-// GetNameserversAsCIDR returns nameservers (if any) listed in
-// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
-// This function's output is intended for net.ParseCIDR
-func GetNameserversAsCIDR(resolvConf []byte) []string {
- nameservers := []string{}
- for _, nameserver := range GetNameservers(resolvConf, types.IP) {
- var address string
- // If IPv6, strip zone if present
- if strings.Contains(nameserver, ":") {
- address = strings.Split(nameserver, "%")[0] + "/128"
- } else {
- address = nameserver + "/32"
- }
- nameservers = append(nameservers, address)
- }
- return nameservers
-}
-
-// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf
-// If more than one search line is encountered, only the contents of the last
-// one is returned.
-func GetSearchDomains(resolvConf []byte) []string {
- domains := []string{}
- for _, line := range getLines(resolvConf, []byte("#")) {
- match := searchRegexp.FindSubmatch(line)
- if match == nil {
- continue
- }
- domains = strings.Fields(string(match[1]))
- }
- return domains
-}
-
-// GetOptions returns options (if any) listed in /etc/resolv.conf
-// If more than one options line is encountered, only the contents of the last
-// one is returned.
-func GetOptions(resolvConf []byte) []string {
- options := []string{}
- for _, line := range getLines(resolvConf, []byte("#")) {
- match := optionsRegexp.FindSubmatch(line)
- if match == nil {
- continue
- }
- options = strings.Fields(string(match[1]))
- }
- return options
-}
-
-// Build writes a configuration file to path containing a "nameserver" entry
-// for every element in dns, a "search" entry for every element in
-// dnsSearch, and an "options" entry for every element in dnsOptions.
-func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) {
- content := bytes.NewBuffer(nil)
- if len(dnsSearch) > 0 {
- if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." {
- if _, err := content.WriteString("search " + searchString + "\n"); err != nil {
- return nil, err
- }
- }
- }
- for _, dns := range dns {
- if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil {
- return nil, err
- }
- }
- if len(dnsOptions) > 0 {
- if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" {
- if _, err := content.WriteString("options " + optsString + "\n"); err != nil {
- return nil, err
- }
- }
- }
-
- hash, err := ioutils.HashData(bytes.NewReader(content.Bytes()))
- if err != nil {
- return nil, err
- }
-
- return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644)
-}
diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go
deleted file mode 100644
index db1960c10..000000000
--- a/vendor/github.com/docker/libnetwork/types/types.go
+++ /dev/null
@@ -1,653 +0,0 @@
-// Package types contains types that are common across libnetwork project
-package types
-
-import (
- "bytes"
- "fmt"
- "net"
- "strconv"
- "strings"
-
- "github.com/ishidawataru/sctp"
-)
-
-// constants for the IP address type
-const (
- IP = iota // IPv4 and IPv6
- IPv4
- IPv6
-)
-
-// EncryptionKey is the libnetwork representation of the key distributed by the lead
-// manager.
-type EncryptionKey struct {
- Subsystem string
- Algorithm int32
- Key []byte
- LamportTime uint64
-}
-
-// UUID represents a globally unique ID of various resources like network and endpoint
-type UUID string
-
-// QosPolicy represents a quality of service policy on an endpoint
-type QosPolicy struct {
- MaxEgressBandwidth uint64
-}
-
-// TransportPort represents a local Layer 4 endpoint
-type TransportPort struct {
- Proto Protocol
- Port uint16
-}
-
-// Equal checks if this instance of Transportport is equal to the passed one
-func (t *TransportPort) Equal(o *TransportPort) bool {
- if t == o {
- return true
- }
-
- if o == nil {
- return false
- }
-
- if t.Proto != o.Proto || t.Port != o.Port {
- return false
- }
-
- return true
-}
-
-// GetCopy returns a copy of this TransportPort structure instance
-func (t *TransportPort) GetCopy() TransportPort {
- return TransportPort{Proto: t.Proto, Port: t.Port}
-}
-
-// String returns the TransportPort structure in string form
-func (t *TransportPort) String() string {
- return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port)
-}
-
-// FromString reads the TransportPort structure from string
-func (t *TransportPort) FromString(s string) error {
- ps := strings.Split(s, "/")
- if len(ps) == 2 {
- t.Proto = ParseProtocol(ps[0])
- if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil {
- t.Port = uint16(p)
- return nil
- }
- }
- return BadRequestErrorf("invalid format for transport port: %s", s)
-}
-
-// PortBinding represents a port binding between the container and the host
-type PortBinding struct {
- Proto Protocol
- IP net.IP
- Port uint16
- HostIP net.IP
- HostPort uint16
- HostPortEnd uint16
-}
-
-// HostAddr returns the host side transport address
-func (p PortBinding) HostAddr() (net.Addr, error) {
- switch p.Proto {
- case UDP:
- return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
- case TCP:
- return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
- case SCTP:
- return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.HostIP}}, Port: int(p.HostPort)}, nil
- default:
- return nil, ErrInvalidProtocolBinding(p.Proto.String())
- }
-}
-
-// ContainerAddr returns the container side transport address
-func (p PortBinding) ContainerAddr() (net.Addr, error) {
- switch p.Proto {
- case UDP:
- return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil
- case TCP:
- return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil
- case SCTP:
- return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.IP}}, Port: int(p.Port)}, nil
- default:
- return nil, ErrInvalidProtocolBinding(p.Proto.String())
- }
-}
-
-// GetCopy returns a copy of this PortBinding structure instance
-func (p *PortBinding) GetCopy() PortBinding {
- return PortBinding{
- Proto: p.Proto,
- IP: GetIPCopy(p.IP),
- Port: p.Port,
- HostIP: GetIPCopy(p.HostIP),
- HostPort: p.HostPort,
- HostPortEnd: p.HostPortEnd,
- }
-}
-
-// String returns the PortBinding structure in string form
-func (p *PortBinding) String() string {
- ret := fmt.Sprintf("%s/", p.Proto)
- if p.IP != nil {
- ret += p.IP.String()
- }
- ret = fmt.Sprintf("%s:%d/", ret, p.Port)
- if p.HostIP != nil {
- ret += p.HostIP.String()
- }
- ret = fmt.Sprintf("%s:%d", ret, p.HostPort)
- return ret
-}
-
-// FromString reads the PortBinding structure from string s.
-// String s is a triple of "protocol/containerIP:port/hostIP:port"
-// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form.
-// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported.
-// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString
-// returns an error.
-func (p *PortBinding) FromString(s string) error {
- ps := strings.Split(s, "/")
- if len(ps) != 3 {
- return BadRequestErrorf("invalid format for port binding: %s", s)
- }
-
- p.Proto = ParseProtocol(ps[0])
-
- var err error
- if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil {
- return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error())
- }
-
- if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil {
- return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error())
- }
-
- return nil
-}
-
-func parseIPPort(s string) (net.IP, uint16, error) {
- hoststr, portstr, err := net.SplitHostPort(s)
- if err != nil {
- return nil, 0, err
- }
-
- ip := net.ParseIP(hoststr)
- if ip == nil {
- return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr)
- }
-
- port, err := strconv.ParseUint(portstr, 10, 16)
- if err != nil {
- return nil, 0, BadRequestErrorf("invalid port: %s", portstr)
- }
-
- return ip, uint16(port), nil
-}
-
-// Equal checks if this instance of PortBinding is equal to the passed one
-func (p *PortBinding) Equal(o *PortBinding) bool {
- if p == o {
- return true
- }
-
- if o == nil {
- return false
- }
-
- if p.Proto != o.Proto || p.Port != o.Port ||
- p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd {
- return false
- }
-
- if p.IP != nil {
- if !p.IP.Equal(o.IP) {
- return false
- }
- } else {
- if o.IP != nil {
- return false
- }
- }
-
- if p.HostIP != nil {
- if !p.HostIP.Equal(o.HostIP) {
- return false
- }
- } else {
- if o.HostIP != nil {
- return false
- }
- }
-
- return true
-}
-
-// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid.
-type ErrInvalidProtocolBinding string
-
-func (ipb ErrInvalidProtocolBinding) Error() string {
- return fmt.Sprintf("invalid transport protocol: %s", string(ipb))
-}
-
-const (
- // ICMP is for the ICMP ip protocol
- ICMP = 1
- // TCP is for the TCP ip protocol
- TCP = 6
- // UDP is for the UDP ip protocol
- UDP = 17
- // SCTP is for the SCTP ip protocol
- SCTP = 132
-)
-
-// Protocol represents an IP protocol number
-type Protocol uint8
-
-func (p Protocol) String() string {
- switch p {
- case ICMP:
- return "icmp"
- case TCP:
- return "tcp"
- case UDP:
- return "udp"
- case SCTP:
- return "sctp"
- default:
- return fmt.Sprintf("%d", p)
- }
-}
-
-// ParseProtocol returns the respective Protocol type for the passed string
-func ParseProtocol(s string) Protocol {
- switch strings.ToLower(s) {
- case "icmp":
- return ICMP
- case "udp":
- return UDP
- case "tcp":
- return TCP
- case "sctp":
- return SCTP
- default:
- return 0
- }
-}
-
-// GetMacCopy returns a copy of the passed MAC address
-func GetMacCopy(from net.HardwareAddr) net.HardwareAddr {
- if from == nil {
- return nil
- }
- to := make(net.HardwareAddr, len(from))
- copy(to, from)
- return to
-}
-
-// GetIPCopy returns a copy of the passed IP address
-func GetIPCopy(from net.IP) net.IP {
- if from == nil {
- return nil
- }
- to := make(net.IP, len(from))
- copy(to, from)
- return to
-}
-
-// GetIPNetCopy returns a copy of the passed IP Network
-func GetIPNetCopy(from *net.IPNet) *net.IPNet {
- if from == nil {
- return nil
- }
- bm := make(net.IPMask, len(from.Mask))
- copy(bm, from.Mask)
- return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm}
-}
-
-// GetIPNetCanonical returns the canonical form for the passed network
-func GetIPNetCanonical(nw *net.IPNet) *net.IPNet {
- if nw == nil {
- return nil
- }
- c := GetIPNetCopy(nw)
- c.IP = c.IP.Mask(nw.Mask)
- return c
-}
-
-// CompareIPNet returns equal if the two IP Networks are equal
-func CompareIPNet(a, b *net.IPNet) bool {
- if a == b {
- return true
- }
- if a == nil || b == nil {
- return false
- }
- return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask)
-}
-
-// GetMinimalIP returns the address in its shortest form
-// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned.
-// Otherwise ip is returned unchanged.
-func GetMinimalIP(ip net.IP) net.IP {
- if ip != nil && ip.To4() != nil {
- return ip.To4()
- }
- return ip
-}
-
-// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation
-func GetMinimalIPNet(nw *net.IPNet) *net.IPNet {
- if nw == nil {
- return nil
- }
- if len(nw.IP) == 16 && nw.IP.To4() != nil {
- m := nw.Mask
- if len(m) == 16 {
- m = m[12:16]
- }
- return &net.IPNet{IP: nw.IP.To4(), Mask: m}
- }
- return nw
-}
-
-// IsIPNetValid returns true if the ipnet is a valid network/mask
-// combination. Otherwise returns false.
-func IsIPNetValid(nw *net.IPNet) bool {
- return nw.String() != "0.0.0.0/0"
-}
-
-var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-
-// compareIPMask checks if the passed ip and mask are semantically compatible.
-// It returns the byte indexes for the address and mask so that caller can
-// do bitwise operations without modifying address representation.
-func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) {
- // Find the effective starting of address and mask
- if len(ip) == net.IPv6len && ip.To4() != nil {
- is = 12
- }
- if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) {
- ms = 12
- }
- // Check if address and mask are semantically compatible
- if len(ip[is:]) != len(mask[ms:]) {
- err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask)
- }
- return
-}
-
-// GetHostPartIP returns the host portion of the ip address identified by the mask.
-// IP address representation is not modified. If address and mask are not compatible
-// an error is returned.
-func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) {
- // Find the effective starting of address and mask
- is, ms, err := compareIPMask(ip, mask)
- if err != nil {
- return nil, fmt.Errorf("cannot compute host portion ip address because %s", err)
- }
-
- // Compute host portion
- out := GetIPCopy(ip)
- for i := 0; i < len(mask[ms:]); i++ {
- out[is+i] &= ^mask[ms+i]
- }
-
- return out, nil
-}
-
-// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask).
-// IP address representation is not modified. If address and mask are not compatible
-// an error is returned.
-func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) {
- // Find the effective starting of address and mask
- is, ms, err := compareIPMask(ip, mask)
- if err != nil {
- return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err)
- }
-
- // Compute broadcast address
- out := GetIPCopy(ip)
- for i := 0; i < len(mask[ms:]); i++ {
- out[is+i] |= ^mask[ms+i]
- }
-
- return out, nil
-}
-
-// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation
-func ParseCIDR(cidr string) (n *net.IPNet, e error) {
- var i net.IP
- if i, n, e = net.ParseCIDR(cidr); e == nil {
- n.IP = i
- }
- return
-}
-
-const (
- // NEXTHOP indicates a StaticRoute with an IP next hop.
- NEXTHOP = iota
-
- // CONNECTED indicates a StaticRoute with an interface for directly connected peers.
- CONNECTED
-)
-
-// StaticRoute is a statically-provisioned IP route.
-type StaticRoute struct {
- Destination *net.IPNet
-
- RouteType int // NEXT_HOP or CONNECTED
-
- // NextHop will be resolved by the kernel (i.e. as a loose hop).
- NextHop net.IP
-}
-
-// GetCopy returns a copy of this StaticRoute structure
-func (r *StaticRoute) GetCopy() *StaticRoute {
- d := GetIPNetCopy(r.Destination)
- nh := GetIPCopy(r.NextHop)
- return &StaticRoute{Destination: d,
- RouteType: r.RouteType,
- NextHop: nh,
- }
-}
-
-// InterfaceStatistics represents the interface's statistics
-type InterfaceStatistics struct {
- RxBytes uint64
- RxPackets uint64
- RxErrors uint64
- RxDropped uint64
- TxBytes uint64
- TxPackets uint64
- TxErrors uint64
- TxDropped uint64
-}
-
-func (is *InterfaceStatistics) String() string {
- return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d",
- is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped)
-}
-
-/******************************
- * Well-known Error Interfaces
- ******************************/
-
-// MaskableError is an interface for errors which can be ignored by caller
-type MaskableError interface {
- // Maskable makes implementer into MaskableError type
- Maskable()
-}
-
-// RetryError is an interface for errors which might get resolved through retry
-type RetryError interface {
- // Retry makes implementer into RetryError type
- Retry()
-}
-
-// BadRequestError is an interface for errors originated by a bad request
-type BadRequestError interface {
- // BadRequest makes implementer into BadRequestError type
- BadRequest()
-}
-
-// NotFoundError is an interface for errors raised because a needed resource is not available
-type NotFoundError interface {
- // NotFound makes implementer into NotFoundError type
- NotFound()
-}
-
-// ForbiddenError is an interface for errors which denote a valid request that cannot be honored
-type ForbiddenError interface {
- // Forbidden makes implementer into ForbiddenError type
- Forbidden()
-}
-
-// NoServiceError is an interface for errors returned when the required service is not available
-type NoServiceError interface {
- // NoService makes implementer into NoServiceError type
- NoService()
-}
-
-// TimeoutError is an interface for errors raised because of timeout
-type TimeoutError interface {
- // Timeout makes implementer into TimeoutError type
- Timeout()
-}
-
-// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented
-type NotImplementedError interface {
- // NotImplemented makes implementer into NotImplementedError type
- NotImplemented()
-}
-
-// InternalError is an interface for errors raised because of an internal error
-type InternalError interface {
- // Internal makes implementer into InternalError type
- Internal()
-}
-
-/******************************
- * Well-known Error Formatters
- ******************************/
-
-// BadRequestErrorf creates an instance of BadRequestError
-func BadRequestErrorf(format string, params ...interface{}) error {
- return badRequest(fmt.Sprintf(format, params...))
-}
-
-// NotFoundErrorf creates an instance of NotFoundError
-func NotFoundErrorf(format string, params ...interface{}) error {
- return notFound(fmt.Sprintf(format, params...))
-}
-
-// ForbiddenErrorf creates an instance of ForbiddenError
-func ForbiddenErrorf(format string, params ...interface{}) error {
- return forbidden(fmt.Sprintf(format, params...))
-}
-
-// NoServiceErrorf creates an instance of NoServiceError
-func NoServiceErrorf(format string, params ...interface{}) error {
- return noService(fmt.Sprintf(format, params...))
-}
-
-// NotImplementedErrorf creates an instance of NotImplementedError
-func NotImplementedErrorf(format string, params ...interface{}) error {
- return notImpl(fmt.Sprintf(format, params...))
-}
-
-// TimeoutErrorf creates an instance of TimeoutError
-func TimeoutErrorf(format string, params ...interface{}) error {
- return timeout(fmt.Sprintf(format, params...))
-}
-
-// InternalErrorf creates an instance of InternalError
-func InternalErrorf(format string, params ...interface{}) error {
- return internal(fmt.Sprintf(format, params...))
-}
-
-// InternalMaskableErrorf creates an instance of InternalError and MaskableError
-func InternalMaskableErrorf(format string, params ...interface{}) error {
- return maskInternal(fmt.Sprintf(format, params...))
-}
-
-// RetryErrorf creates an instance of RetryError
-func RetryErrorf(format string, params ...interface{}) error {
- return retry(fmt.Sprintf(format, params...))
-}
-
-/***********************
- * Internal Error Types
- ***********************/
-type badRequest string
-
-func (br badRequest) Error() string {
- return string(br)
-}
-func (br badRequest) BadRequest() {}
-
-type maskBadRequest string
-
-type notFound string
-
-func (nf notFound) Error() string {
- return string(nf)
-}
-func (nf notFound) NotFound() {}
-
-type forbidden string
-
-func (frb forbidden) Error() string {
- return string(frb)
-}
-func (frb forbidden) Forbidden() {}
-
-type noService string
-
-func (ns noService) Error() string {
- return string(ns)
-}
-func (ns noService) NoService() {}
-
-type maskNoService string
-
-type timeout string
-
-func (to timeout) Error() string {
- return string(to)
-}
-func (to timeout) Timeout() {}
-
-type notImpl string
-
-func (ni notImpl) Error() string {
- return string(ni)
-}
-func (ni notImpl) NotImplemented() {}
-
-type internal string
-
-func (nt internal) Error() string {
- return string(nt)
-}
-func (nt internal) Internal() {}
-
-type maskInternal string
-
-func (mnt maskInternal) Error() string {
- return string(mnt)
-}
-func (mnt maskInternal) Internal() {}
-func (mnt maskInternal) Maskable() {}
-
-type retry string
-
-func (r retry) Error() string {
- return string(r)
-}
-func (r retry) Retry() {}
diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
index 12daa3461..ec3562c97 100644
--- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS
+++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
@@ -1,209 +1 @@
-# This is the official list of go-dockerclient authors for copyright purposes.
-
-Abhishek Chanda
-Adam Bell-Hanssen
-Adnan Khan
-Adrien Kohlbecker
-Aithal
-Aldrin Leal
-Alex Dadgar
-Alfonso Acosta
-André Carvalho
-Andreas Jaekle
-Andrew Snodgrass
-Andrews Medina
-Andrey Sibiryov
-Andy Goldstein
-Anirudh Aithal
-Antoine Brechon
-Antonio Murdaca
-Artem Sidorenko
-Arthur Rodrigues
-Ben Marini
-Ben McCann
-Ben Parees
-Benno van den Berg
-Bradley Cicenas
-Brendan Fosberry
-Brett Buddin
-Brian Lalor
-Brian P. Hamachek
-Brian Palmer
-Bryan Boreham
-Burke Libbey
-Carlos Diaz-Padron
-Carson A
-Cássio Botaro
-Cesar Wong
-Cezar Sa Espinola
-Changping Chen
-Charles Teinturier
-Cheah Chu Yeow
-cheneydeng
-Chris Bednarski
-Chris Stavropoulos
-Christian Stewart
-Christophe Mourette
-Clayton Coleman
-Clint Armstrong
-CMGS
-Colin Hebert
-Craig Jellick
-Damien Lespiau
-Damon Wang
-Dan Williams
-Daniel, Dao Quang Minh
-Daniel Black
-Daniel Garcia
-Daniel Hess
-Daniel Hiltgen
-Daniel Nephin
-Daniel Tsui
-Darren Shepherd
-Dave Choi
-David Huie
-Dawn Chen
-Denis Makogon
-Derek Petersen
-Dinesh Subhraveti
-Drew Wells
-Ed
-Elias G. Schneevoigt
-Erez Horev
-Eric Anderson
-Eric Fode
-Eric J. Holmes
-Eric Mountain
-Erwin van Eyk
-Ethan Mosbaugh
-Ewout Prangsma
-Fabio Rehm
-Fatih Arslan
-Faye Salwin
-Felipe Oliveira
-Flavia Missi
-Florent Aide
-Francisco Souza
-Frank Groeneveld
-George MacRorie
-George Moura
-Grégoire Delattre
-Guilherme Rezende
-Guillermo Álvarez Fernández
-Harry Zhang
-He Simei
-Isaac Schnitzer
-Ivan Mikushin
-James Bardin
-James Nugent
-Jamie Snell
-Januar Wayong
-Jari Kolehmainen
-Jason Wilder
-Jawher Moussa
-Jean-Baptiste Dalido
-Jeff Mitchell
-Jeffrey Hulten
-Jen Andre
-Jérôme Laurens
-Jim Minter
-Johan Euphrosine
-Johannes Scheuermann
-John Hughes
-Jorge Marey
-Julian Einwag
-Kamil Domanski
-Karan Misra
-Ken Herner
-Kevin Lin
-Kevin Xu
-Kim, Hirokuni
-Kostas Lekkas
-Kyle Allan
-Kyle Quest
-Yunhee Lee
-Liron Levin
-Lior Yankovich
-Liu Peng
-Lorenz Leutgeb
-Lucas Clemente
-Lucas Weiblen
-Lyon Hill
-Mantas Matelis
-Manuel Vogel
-Marguerite des Trois Maisons
-Mariusz Borsa
-Martin Sweeney
-Máximo Cuadros Ortiz
-Michael Schmatz
-Michal Fojtik
-Mike Dillon
-Mrunal Patel
-Nate Jones
-Nathan Pemberton
-Nguyen Sy Thanh Son
-Nicholas Van Wiggeren
-Nick Ethier
-niko83
-Omeid Matten
-Orivej Desh
-Paul Bellamy
-Paul Morie
-Paul Weil
-Peng Yin
-Peter Edge
-Peter Jihoon Kim
-Peter Teich
-Phil Lu
-Philippe Lafoucrière
-Radek Simko
-Rafe Colton
-Randy Fay
-Raphaël Pinson
-Reed Allman
-RJ Catalano
-Rob Miller
-Robbert Klarenbeek
-Robert Williamson
-Roman Khlystik
-Russell Haering
-Salvador Gironès
-Sam Rijs
-Sami Wagiaalla
-Samuel Archambault
-Samuel Karp
-Sebastian Borza
-Sergey Ponomarev
-Seth Jennings
-Shane Xie
-Silas Sewell
-Simon Eskildsen
-Simon Menke
-Skolos
-Soulou
-Sridhar Ratnakumar
-Steven Jack
-Summer Mousa
-Sunjin Lee
-Sunny
-Swaroop Ramachandra
-Tarsis Azevedo
-Tim Schindler
-Timothy St. Clair
-Tobi Knaup
-Tom Wilkie
-Tomas Knappek
-Tonic
-ttyh061
-Umut Çömlekçioğlu
-upccup
-Victor Marmol
-Vijay Krishnan
-Vincenzo Prignano
-Vlad Alexandru Ionescu
-Weitao Zhou
-Wiliam Souza
-Ye Yin
-Yosuke Otosu
-Yu, Zou
-Yuriy Bogdanov
+# The official list of authors for copyright purposes can be found on GitHub: https://github.com/fsouza/go-dockerclient/graphs/contributors
diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md
index a9a74fbc4..2323d89b1 100644
--- a/vendor/github.com/fsouza/go-dockerclient/README.md
+++ b/vendor/github.com/fsouza/go-dockerclient/README.md
@@ -25,12 +25,6 @@ implemented/merged.
For new projects, using the official SDK is probably more appropriate as
go-dockerclient lags behind the official SDK.
-When using the official SDK, keep in mind that because of how the its
-dependencies are organized, you may need some extra steps in order to be able
-to import it in your projects (see
-[#784](https://github.com/fsouza/go-dockerclient/issues/784) and
-[moby/moby#28269](https://github.com/moby/moby/issues/28269)).
-
## Example
```go
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_stats.go b/vendor/github.com/fsouza/go-dockerclient/container_stats.go
index ee2499a52..99d9faa3d 100644
--- a/vendor/github.com/fsouza/go-dockerclient/container_stats.go
+++ b/vendor/github.com/fsouza/go-dockerclient/container_stats.go
@@ -55,6 +55,30 @@ type Stats struct {
TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"`
HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"`
Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"`
+ Anon uint64 `json:"anon,omitempty" yaml:"anon,omitempty" toml:"anon,omitempty"`
+ AnonThp uint64 `json:"anon_thp,omitempty" yaml:"anon_thp,omitempty" toml:"anon_thp,omitempty"`
+ File uint64 `json:"file,omitempty" yaml:"file,omitempty" toml:"file,omitempty"`
+ FileDirty uint64 `json:"file_dirty,omitempty" yaml:"file_dirty,omitempty" toml:"file_dirty,omitempty"`
+ FileMapped uint64 `json:"file_mapped,omitempty" yaml:"file_mapped,omitempty" toml:"file_mapped,omitempty"`
+ FileWriteback uint64 `json:"file_writeback,omitempty" yaml:"file_writeback,omitempty" toml:"file_writeback,omitempty"`
+ KernelStack uint64 `json:"kernel_stack,omitempty" yaml:"kernel_stack,omitempty" toml:"kernel_stack,omitempty"`
+ Pgactivate uint64 `json:"pgactivate,omitempty" yaml:"pgactivate,omitempty" toml:"pgactivate,omitempty"`
+ Pgdeactivate uint64 `json:"pgdeactivate,omitempty" yaml:"pgdeactivate,omitempty" toml:"pgdeactivate,omitempty"`
+ Pglazyfree uint64 `json:"pglazyfree,omitempty" yaml:"pglazyfree,omitempty" toml:"pglazyfree,omitempty"`
+ Pglazyfreed uint64 `json:"pglazyfreed,omitempty" yaml:"pglazyfreed,omitempty" toml:"pglazyfreed,omitempty"`
+ Pgrefill uint64 `json:"pgrefill,omitempty" yaml:"pgrefill,omitempty" toml:"pgrefill,omitempty"`
+ Pgscan uint64 `json:"pgscan,omitempty" yaml:"pgscan,omitempty" toml:"pgscan,omitempty"`
+ Pgsteal uint64 `json:"pgsteal,omitempty" yaml:"pgsteal,omitempty" toml:"pgsteal,omitempty"`
+ Shmem uint64 `json:"shmem,omitempty" yaml:"shmem,omitempty" toml:"shmem,omitempty"`
+ Slab uint64 `json:"slab,omitempty" yaml:"slab,omitempty" toml:"slab,omitempty"`
+ SlabReclaimable uint64 `json:"slab_reclaimable,omitempty" yaml:"slab_reclaimable,omitempty" toml:"slab_reclaimable,omitempty"`
+ SlabUnreclaimable uint64 `json:"slab_unreclaimable,omitempty" yaml:"slab_unreclaimable,omitempty" toml:"slab_unreclaimable,omitempty"`
+ Sock uint64 `json:"sock,omitempty" yaml:"sock,omitempty" toml:"sock,omitempty"`
+ ThpCollapseAlloc uint64 `json:"thp_collapse_alloc,omitempty" yaml:"thp_collapse_alloc,omitempty" toml:"thp_collapse_alloc,omitempty"`
+ ThpFaultAlloc uint64 `json:"thp_fault_alloc,omitempty" yaml:"thp_fault_alloc,omitempty" toml:"thp_fault_alloc,omitempty"`
+ WorkingsetActivate uint64 `json:"workingset_activate,omitempty" yaml:"workingset_activate,omitempty" toml:"workingset_activate,omitempty"`
+ WorkingsetNodereclaim uint64 `json:"workingset_nodereclaim,omitempty" yaml:"workingset_nodereclaim,omitempty" toml:"workingset_nodereclaim,omitempty"`
+ WorkingsetRefault uint64 `json:"workingset_refault,omitempty" yaml:"workingset_refault,omitempty" toml:"workingset_refault,omitempty"`
} `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"`
MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"`
Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"`
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod
index 1e06cdeed..06fe42430 100644
--- a/vendor/github.com/fsouza/go-dockerclient/go.mod
+++ b/vendor/github.com/fsouza/go-dockerclient/go.mod
@@ -4,27 +4,31 @@ go 1.17
require (
github.com/Microsoft/go-winio v0.5.2
- github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible
+ github.com/docker/docker v20.10.17+incompatible
github.com/docker/go-units v0.4.0
github.com/google/go-cmp v0.5.8
github.com/gorilla/mux v1.8.0
- golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
+ golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
)
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
- github.com/containerd/containerd v1.6.1 // indirect
+ github.com/Microsoft/hcsshim v0.9.3 // indirect
+ github.com/containerd/cgroups v1.0.3 // indirect
+ github.com/containerd/containerd v1.6.6 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/klauspost/compress v1.11.13 // indirect
- github.com/moby/sys/mount v0.2.0 // indirect
- github.com/moby/sys/mountinfo v0.5.0 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/moby/sys/mount v0.3.3 // indirect
+ github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.0.2 // indirect
- github.com/opencontainers/runc v1.1.0 // indirect
+ github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
+ github.com/opencontainers/runc v1.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
+ go.opencensus.io v0.23.0 // indirect
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum
index 13b0d194e..ad52e1c17 100644
--- a/vendor/github.com/fsouza/go-dockerclient/go.sum
+++ b/vendor/github.com/fsouza/go-dockerclient/go.sum
@@ -81,8 +81,9 @@ github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+V
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
+github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -172,6 +173,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
@@ -195,8 +197,9 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
-github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o=
github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
+github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
+github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -216,6 +219,7 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
@@ -226,6 +230,7 @@ github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6T
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
+github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo=
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
@@ -249,13 +254,16 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
+github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
+github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -298,8 +306,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible h1:DPMrerxYRbdZnOnlPPwt9QGf207ETn7FebEmxUQI3bE=
-github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
+github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
@@ -395,6 +403,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -457,6 +466,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -542,7 +552,6 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -576,6 +585,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -588,12 +598,13 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
-github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
+github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
+github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
@@ -615,6 +626,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -632,6 +644,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -641,6 +654,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -650,16 +664,18 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
+github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw=
+github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -671,6 +687,7 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -693,6 +710,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -833,6 +851,7 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
@@ -1090,13 +1109,16 @@ golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1177,6 +1199,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1333,8 +1356,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
index aa8cbd7ce..7e6f7aeee 100644
--- a/vendor/github.com/imdario/mergo/README.md
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -8,8 +8,7 @@
[![Coverage Status][9]][10]
[![Sourcegraph][11]][12]
[![FOSSA Status][13]][14]
-
-[![GoCenter Kudos][15]][16]
+[![Become my sponsor][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
@@ -25,8 +24,8 @@
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
-[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
-[16]: https://search.gocenter.io/github.com/imdario/mergo
+[15]: https://img.shields.io/github/sponsors/imdario
+[16]: https://github.com/sponsors/imdario
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
-Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
@@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
-[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
-[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
+<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
### Mergo in the wild
+- [cli/cli](https://github.com/cli/cli)
- [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch)
@@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
+- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+- [tjpnz/structbot](https://github.com/tjpnz/structbot)
## Install
@@ -168,7 +169,7 @@ func main() {
Note: if test are failing due missing package, please execute:
- go get gopkg.in/yaml.v2
+ go get gopkg.in/yaml.v3
### Transformers
@@ -218,7 +219,6 @@ func main() {
}
```
-
## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
@@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
Written by [Dario Castañé](http://dario.im).
-## Top Contributors
-
-[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
-[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
-[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
-[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
-[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
-[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
-[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
-[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
-
-
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod
index 3d689d93e..6f476d6ff 100644
--- a/vendor/github.com/imdario/mergo/go.mod
+++ b/vendor/github.com/imdario/mergo/go.mod
@@ -2,4 +2,4 @@ module github.com/imdario/mergo
go 1.13
-require gopkg.in/yaml.v2 v2.3.0
+require gopkg.in/yaml.v3 v3.0.0
diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum
index 168980da5..223abcb42 100644
--- a/vendor/github.com/imdario/mergo/go.sum
+++ b/vendor/github.com/imdario/mergo/go.sum
@@ -1,4 +1,4 @@
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
index 8c2a8fcd9..8b4e2f47a 100644
--- a/vendor/github.com/imdario/mergo/merge.go
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
visited[h] = &visit{addr, typ, seen}
}
- if config.Transformers != nil && !isEmptyValue(dst) {
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
index 3cc926c7f..9fe362d47 100644
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -17,7 +17,7 @@ import (
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer")
@@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return
}
vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}
diff --git a/vendor/github.com/ishidawataru/sctp/.gitignore b/vendor/github.com/ishidawataru/sctp/.gitignore
deleted file mode 100644
index cf2d826c1..000000000
--- a/vendor/github.com/ishidawataru/sctp/.gitignore
+++ /dev/null
@@ -1,16 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
-
-example/example
diff --git a/vendor/github.com/ishidawataru/sctp/.travis.yml b/vendor/github.com/ishidawataru/sctp/.travis.yml
deleted file mode 100644
index a1c693c01..000000000
--- a/vendor/github.com/ishidawataru/sctp/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-go:
- - 1.9.x
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - 1.13.x
-# allowing test cases to fail for the versions were not suppotred by ppc64le
-matrix:
- allow_failures:
- - go: 1.9.x
- - go: 1.10.x
- - go: 1.13.x
-
-
-script:
- - go test -v -race ./...
- - GOOS=linux GOARCH=amd64 go build .
- - GOOS=linux GOARCH=arm go build .
- - GOOS=linux GOARCH=arm64 go build .
- - GOOS=linux GOARCH=ppc64le go build .
- - GOOS=linux GOARCH=mips64le go build .
- - (go version | grep go1.6 > /dev/null) || GOOS=linux GOARCH=s390x go build .
-# can be compiled but not functional:
- - GOOS=linux GOARCH=386 go build .
- - GOOS=windows GOARCH=amd64 go build .
diff --git a/vendor/github.com/ishidawataru/sctp/GO_LICENSE b/vendor/github.com/ishidawataru/sctp/GO_LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/vendor/github.com/ishidawataru/sctp/GO_LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ishidawataru/sctp/LICENSE b/vendor/github.com/ishidawataru/sctp/LICENSE
deleted file mode 100644
index 8dada3eda..000000000
--- a/vendor/github.com/ishidawataru/sctp/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/ishidawataru/sctp/NOTICE b/vendor/github.com/ishidawataru/sctp/NOTICE
deleted file mode 100644
index cfb675fd4..000000000
--- a/vendor/github.com/ishidawataru/sctp/NOTICE
+++ /dev/null
@@ -1,3 +0,0 @@
-This source code includes following third party code
-
-- ipsock_linux.go : licensed by the Go authors, see GO_LICENSE file for the license which applies to the code
diff --git a/vendor/github.com/ishidawataru/sctp/README.md b/vendor/github.com/ishidawataru/sctp/README.md
deleted file mode 100644
index 574ececa8..000000000
--- a/vendor/github.com/ishidawataru/sctp/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-Stream Control Transmission Protocol (SCTP)
-----
-
-[![Build Status](https://travis-ci.org/ishidawataru/sctp.svg?branch=master)](https://travis-ci.org/ishidawataru/sctp/builds)
-
-Examples
-----
-
-See `example/sctp.go`
-
-```go
-$ cd example
-$ go build
-$ # run example SCTP server
-$ ./example -server -port 1000 -ip 10.10.0.1,10.20.0.1
-$ # run example SCTP client
-$ ./example -port 1000 -ip 10.10.0.1,10.20.0.1
-```
diff --git a/vendor/github.com/ishidawataru/sctp/go.mod b/vendor/github.com/ishidawataru/sctp/go.mod
deleted file mode 100644
index 5adf982b0..000000000
--- a/vendor/github.com/ishidawataru/sctp/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/ishidawataru/sctp
-
-go 1.12
diff --git a/vendor/github.com/ishidawataru/sctp/ipsock_linux.go b/vendor/github.com/ishidawataru/sctp/ipsock_linux.go
deleted file mode 100644
index 3df30fa46..000000000
--- a/vendor/github.com/ishidawataru/sctp/ipsock_linux.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the GO_LICENSE file.
-
-package sctp
-
-import (
- "net"
- "os"
- "sync"
- "syscall"
-)
-
-//from https://github.com/golang/go
-// Boolean to int.
-func boolint(b bool) int {
- if b {
- return 1
- }
- return 0
-}
-
-//from https://github.com/golang/go
-func ipToSockaddr(family int, ip net.IP, port int, zone string) (syscall.Sockaddr, error) {
- switch family {
- case syscall.AF_INET:
- if len(ip) == 0 {
- ip = net.IPv4zero
- }
- ip4 := ip.To4()
- if ip4 == nil {
- return nil, &net.AddrError{Err: "non-IPv4 address", Addr: ip.String()}
- }
- sa := &syscall.SockaddrInet4{Port: port}
- copy(sa.Addr[:], ip4)
- return sa, nil
- case syscall.AF_INET6:
- // In general, an IP wildcard address, which is either
- // "0.0.0.0" or "::", means the entire IP addressing
- // space. For some historical reason, it is used to
- // specify "any available address" on some operations
- // of IP node.
- //
- // When the IP node supports IPv4-mapped IPv6 address,
- // we allow an listener to listen to the wildcard
- // address of both IP addressing spaces by specifying
- // IPv6 wildcard address.
- if len(ip) == 0 || ip.Equal(net.IPv4zero) {
- ip = net.IPv6zero
- }
- // We accept any IPv6 address including IPv4-mapped
- // IPv6 address.
- ip6 := ip.To16()
- if ip6 == nil {
- return nil, &net.AddrError{Err: "non-IPv6 address", Addr: ip.String()}
- }
- //we set ZoneId to 0, as currently we use this functon only to probe the IP capabilities of the host
- //if real Zone handling is required, the zone cache implementation in golang/net should be pulled here
- sa := &syscall.SockaddrInet6{Port: port, ZoneId: 0}
- copy(sa.Addr[:], ip6)
- return sa, nil
- }
- return nil, &net.AddrError{Err: "invalid address family", Addr: ip.String()}
-}
-
-//from https://github.com/golang/go
-func sockaddr(a *net.TCPAddr, family int) (syscall.Sockaddr, error) {
- if a == nil {
- return nil, nil
- }
- return ipToSockaddr(family, a.IP, a.Port, a.Zone)
-}
-
-//from https://github.com/golang/go
-type ipStackCapabilities struct {
- sync.Once // guards following
- ipv4Enabled bool
- ipv6Enabled bool
- ipv4MappedIPv6Enabled bool
-}
-
-//from https://github.com/golang/go
-var ipStackCaps ipStackCapabilities
-
-//from https://github.com/golang/go
-// supportsIPv4 reports whether the platform supports IPv4 networking
-// functionality.
-func supportsIPv4() bool {
- ipStackCaps.Once.Do(ipStackCaps.probe)
- return ipStackCaps.ipv4Enabled
-}
-
-//from https://github.com/golang/go
-// supportsIPv6 reports whether the platform supports IPv6 networking
-// functionality.
-func supportsIPv6() bool {
- ipStackCaps.Once.Do(ipStackCaps.probe)
- return ipStackCaps.ipv6Enabled
-}
-
-//from https://github.com/golang/go
-// supportsIPv4map reports whether the platform supports mapping an
-// IPv4 address inside an IPv6 address at transport layer
-// protocols. See RFC 4291, RFC 4038 and RFC 3493.
-func supportsIPv4map() bool {
- ipStackCaps.Once.Do(ipStackCaps.probe)
- return ipStackCaps.ipv4MappedIPv6Enabled
-}
-
-//from https://github.com/golang/go
-// Probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication
-// capabilities which are controlled by the IPV6_V6ONLY socket option
-// and kernel configuration.
-//
-// Should we try to use the IPv4 socket interface if we're only
-// dealing with IPv4 sockets? As long as the host system understands
-// IPv4-mapped IPv6, it's okay to pass IPv4-mapeed IPv6 addresses to
-// the IPv6 interface. That simplifies our code and is most
-// general. Unfortunately, we need to run on kernels built without
-// IPv6 support too. So probe the kernel to figure it out.
-func (p *ipStackCapabilities) probe() {
- s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
- switch err {
- case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT:
- case nil:
- syscall.Close(s)
- p.ipv4Enabled = true
- }
- var probes = []struct {
- laddr net.TCPAddr
- value int
- }{
- // IPv6 communication capability
- {laddr: net.TCPAddr{IP: net.IPv6loopback}, value: 1},
- // IPv4-mapped IPv6 address communication capability
- {laddr: net.TCPAddr{IP: net.IPv4(127, 0, 0, 1)}, value: 0},
- }
-
- for i := range probes {
- s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
- if err != nil {
- continue
- }
- defer syscall.Close(s)
- syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value)
- sa, err := sockaddr(&(probes[i].laddr), syscall.AF_INET6)
- if err != nil {
- continue
- }
- if err := syscall.Bind(s, sa); err != nil {
- continue
- }
- if i == 0 {
- p.ipv6Enabled = true
- } else {
- p.ipv4MappedIPv6Enabled = true
- }
- }
-}
-
-//from https://github.com/golang/go
-//Change: we check the first IP address in the list of candidate SCTP IP addresses
-func (a *SCTPAddr) isWildcard() bool {
- if a == nil {
- return true
- }
- if 0 == len(a.IPAddrs) {
- return true
- }
-
- return a.IPAddrs[0].IP.IsUnspecified()
-}
-
-func (a *SCTPAddr) family() int {
- if a != nil {
- for _, ip := range a.IPAddrs {
- if ip.IP.To4() == nil {
- return syscall.AF_INET6
- }
- }
- }
- return syscall.AF_INET
-}
-
-//from https://github.com/golang/go
-func favoriteAddrFamily(network string, laddr *SCTPAddr, raddr *SCTPAddr, mode string) (family int, ipv6only bool) {
- switch network[len(network)-1] {
- case '4':
- return syscall.AF_INET, false
- case '6':
- return syscall.AF_INET6, true
- }
-
- if mode == "listen" && (laddr == nil || laddr.isWildcard()) {
- if supportsIPv4map() || !supportsIPv4() {
- return syscall.AF_INET6, false
- }
- if laddr == nil {
- return syscall.AF_INET, false
- }
- return laddr.family(), false
- }
-
- if (laddr == nil || laddr.family() == syscall.AF_INET) &&
- (raddr == nil || raddr.family() == syscall.AF_INET) {
- return syscall.AF_INET, false
- }
- return syscall.AF_INET6, false
-}
-
-//from https://github.com/golang/go
-//Changes: it is for SCTP only
-func setDefaultSockopts(s int, family int, ipv6only bool) error {
- if family == syscall.AF_INET6 {
- // Allow both IP versions even if the OS default
- // is otherwise. Note that some operating systems
- // never admit this option.
- syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only))
- }
- // Allow broadcast.
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1))
-}
diff --git a/vendor/github.com/ishidawataru/sctp/sctp.go b/vendor/github.com/ishidawataru/sctp/sctp.go
deleted file mode 100644
index 94842f427..000000000
--- a/vendor/github.com/ishidawataru/sctp/sctp.go
+++ /dev/null
@@ -1,729 +0,0 @@
-// Copyright 2019 Wataru Ishida. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sctp
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "net"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "syscall"
- "time"
- "unsafe"
-)
-
-const (
- SOL_SCTP = 132
-
- SCTP_BINDX_ADD_ADDR = 0x01
- SCTP_BINDX_REM_ADDR = 0x02
-
- MSG_NOTIFICATION = 0x8000
-)
-
-const (
- SCTP_RTOINFO = iota
- SCTP_ASSOCINFO
- SCTP_INITMSG
- SCTP_NODELAY
- SCTP_AUTOCLOSE
- SCTP_SET_PEER_PRIMARY_ADDR
- SCTP_PRIMARY_ADDR
- SCTP_ADAPTATION_LAYER
- SCTP_DISABLE_FRAGMENTS
- SCTP_PEER_ADDR_PARAMS
- SCTP_DEFAULT_SENT_PARAM
- SCTP_EVENTS
- SCTP_I_WANT_MAPPED_V4_ADDR
- SCTP_MAXSEG
- SCTP_STATUS
- SCTP_GET_PEER_ADDR_INFO
- SCTP_DELAYED_ACK_TIME
- SCTP_DELAYED_ACK = SCTP_DELAYED_ACK_TIME
- SCTP_DELAYED_SACK = SCTP_DELAYED_ACK_TIME
-
- SCTP_SOCKOPT_BINDX_ADD = 100
- SCTP_SOCKOPT_BINDX_REM = 101
- SCTP_SOCKOPT_PEELOFF = 102
- SCTP_GET_PEER_ADDRS = 108
- SCTP_GET_LOCAL_ADDRS = 109
- SCTP_SOCKOPT_CONNECTX = 110
- SCTP_SOCKOPT_CONNECTX3 = 111
-)
-
-const (
- SCTP_EVENT_DATA_IO = 1 << iota
- SCTP_EVENT_ASSOCIATION
- SCTP_EVENT_ADDRESS
- SCTP_EVENT_SEND_FAILURE
- SCTP_EVENT_PEER_ERROR
- SCTP_EVENT_SHUTDOWN
- SCTP_EVENT_PARTIAL_DELIVERY
- SCTP_EVENT_ADAPTATION_LAYER
- SCTP_EVENT_AUTHENTICATION
- SCTP_EVENT_SENDER_DRY
-
- SCTP_EVENT_ALL = SCTP_EVENT_DATA_IO | SCTP_EVENT_ASSOCIATION | SCTP_EVENT_ADDRESS | SCTP_EVENT_SEND_FAILURE | SCTP_EVENT_PEER_ERROR | SCTP_EVENT_SHUTDOWN | SCTP_EVENT_PARTIAL_DELIVERY | SCTP_EVENT_ADAPTATION_LAYER | SCTP_EVENT_AUTHENTICATION | SCTP_EVENT_SENDER_DRY
-)
-
-type SCTPNotificationType int
-
-const (
- SCTP_SN_TYPE_BASE = SCTPNotificationType(iota + (1 << 15))
- SCTP_ASSOC_CHANGE
- SCTP_PEER_ADDR_CHANGE
- SCTP_SEND_FAILED
- SCTP_REMOTE_ERROR
- SCTP_SHUTDOWN_EVENT
- SCTP_PARTIAL_DELIVERY_EVENT
- SCTP_ADAPTATION_INDICATION
- SCTP_AUTHENTICATION_INDICATION
- SCTP_SENDER_DRY_EVENT
-)
-
-type NotificationHandler func([]byte) error
-
-type EventSubscribe struct {
- DataIO uint8
- Association uint8
- Address uint8
- SendFailure uint8
- PeerError uint8
- Shutdown uint8
- PartialDelivery uint8
- AdaptationLayer uint8
- Authentication uint8
- SenderDry uint8
-}
-
-const (
- SCTP_CMSG_INIT = iota
- SCTP_CMSG_SNDRCV
- SCTP_CMSG_SNDINFO
- SCTP_CMSG_RCVINFO
- SCTP_CMSG_NXTINFO
-)
-
-const (
- SCTP_UNORDERED = 1 << iota
- SCTP_ADDR_OVER
- SCTP_ABORT
- SCTP_SACK_IMMEDIATELY
- SCTP_EOF
-)
-
-const (
- SCTP_MAX_STREAM = 0xffff
-)
-
-type InitMsg struct {
- NumOstreams uint16
- MaxInstreams uint16
- MaxAttempts uint16
- MaxInitTimeout uint16
-}
-
-type SndRcvInfo struct {
- Stream uint16
- SSN uint16
- Flags uint16
- _ uint16
- PPID uint32
- Context uint32
- TTL uint32
- TSN uint32
- CumTSN uint32
- AssocID int32
-}
-
-type SndInfo struct {
- SID uint16
- Flags uint16
- PPID uint32
- Context uint32
- AssocID int32
-}
-
-type GetAddrsOld struct {
- AssocID int32
- AddrNum int32
- Addrs uintptr
-}
-
-type NotificationHeader struct {
- Type uint16
- Flags uint16
- Length uint32
-}
-
-type SCTPState uint16
-
-const (
- SCTP_COMM_UP = SCTPState(iota)
- SCTP_COMM_LOST
- SCTP_RESTART
- SCTP_SHUTDOWN_COMP
- SCTP_CANT_STR_ASSOC
-)
-
-var nativeEndian binary.ByteOrder
-var sndRcvInfoSize uintptr
-
-func init() {
- i := uint16(1)
- if *(*byte)(unsafe.Pointer(&i)) == 0 {
- nativeEndian = binary.BigEndian
- } else {
- nativeEndian = binary.LittleEndian
- }
- info := SndRcvInfo{}
- sndRcvInfoSize = unsafe.Sizeof(info)
-}
-
-func toBuf(v interface{}) []byte {
- var buf bytes.Buffer
- binary.Write(&buf, nativeEndian, v)
- return buf.Bytes()
-}
-
-func htons(h uint16) uint16 {
- if nativeEndian == binary.LittleEndian {
- return (h << 8 & 0xff00) | (h >> 8 & 0xff)
- }
- return h
-}
-
-var ntohs = htons
-
-// setInitOpts sets options for an SCTP association initialization
-// see https://tools.ietf.org/html/rfc4960#page-25
-func setInitOpts(fd int, options InitMsg) error {
- optlen := unsafe.Sizeof(options)
- _, _, err := setsockopt(fd, SCTP_INITMSG, uintptr(unsafe.Pointer(&options)), uintptr(optlen))
- return err
-}
-
-func setNumOstreams(fd, num int) error {
- return setInitOpts(fd, InitMsg{NumOstreams: uint16(num)})
-}
-
-type SCTPAddr struct {
- IPAddrs []net.IPAddr
- Port int
-}
-
-func (a *SCTPAddr) ToRawSockAddrBuf() []byte {
- p := htons(uint16(a.Port))
- if len(a.IPAddrs) == 0 { // if a.IPAddrs list is empty - fall back to IPv4 zero addr
- s := syscall.RawSockaddrInet4{
- Family: syscall.AF_INET,
- Port: p,
- }
- copy(s.Addr[:], net.IPv4zero)
- return toBuf(s)
- }
- buf := []byte{}
- for _, ip := range a.IPAddrs {
- ipBytes := ip.IP
- if len(ipBytes) == 0 {
- ipBytes = net.IPv4zero
- }
- if ip4 := ipBytes.To4(); ip4 != nil {
- s := syscall.RawSockaddrInet4{
- Family: syscall.AF_INET,
- Port: p,
- }
- copy(s.Addr[:], ip4)
- buf = append(buf, toBuf(s)...)
- } else {
- var scopeid uint32
- ifi, err := net.InterfaceByName(ip.Zone)
- if err == nil {
- scopeid = uint32(ifi.Index)
- }
- s := syscall.RawSockaddrInet6{
- Family: syscall.AF_INET6,
- Port: p,
- Scope_id: scopeid,
- }
- copy(s.Addr[:], ipBytes)
- buf = append(buf, toBuf(s)...)
- }
- }
- return buf
-}
-
-func (a *SCTPAddr) String() string {
- var b bytes.Buffer
-
- for n, i := range a.IPAddrs {
- if i.IP.To4() != nil {
- b.WriteString(i.String())
- } else if i.IP.To16() != nil {
- b.WriteRune('[')
- b.WriteString(i.String())
- b.WriteRune(']')
- }
- if n < len(a.IPAddrs)-1 {
- b.WriteRune('/')
- }
- }
- b.WriteRune(':')
- b.WriteString(strconv.Itoa(a.Port))
- return b.String()
-}
-
-func (a *SCTPAddr) Network() string { return "sctp" }
-
-func ResolveSCTPAddr(network, addrs string) (*SCTPAddr, error) {
- tcpnet := ""
- switch network {
- case "", "sctp":
- tcpnet = "tcp"
- case "sctp4":
- tcpnet = "tcp4"
- case "sctp6":
- tcpnet = "tcp6"
- default:
- return nil, fmt.Errorf("invalid net: %s", network)
- }
- elems := strings.Split(addrs, "/")
- if len(elems) == 0 {
- return nil, fmt.Errorf("invalid input: %s", addrs)
- }
- ipaddrs := make([]net.IPAddr, 0, len(elems))
- for _, e := range elems[:len(elems)-1] {
- tcpa, err := net.ResolveTCPAddr(tcpnet, e+":")
- if err != nil {
- return nil, err
- }
- ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone})
- }
- tcpa, err := net.ResolveTCPAddr(tcpnet, elems[len(elems)-1])
- if err != nil {
- return nil, err
- }
- if tcpa.IP != nil {
- ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone})
- } else {
- ipaddrs = nil
- }
- return &SCTPAddr{
- IPAddrs: ipaddrs,
- Port: tcpa.Port,
- }, nil
-}
-
-func SCTPConnect(fd int, addr *SCTPAddr) (int, error) {
- buf := addr.ToRawSockAddrBuf()
- param := GetAddrsOld{
- AddrNum: int32(len(buf)),
- Addrs: uintptr(uintptr(unsafe.Pointer(&buf[0]))),
- }
- optlen := unsafe.Sizeof(param)
- _, _, err := getsockopt(fd, SCTP_SOCKOPT_CONNECTX3, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
- if err == nil {
- return int(param.AssocID), nil
- } else if err != syscall.ENOPROTOOPT {
- return 0, err
- }
- r0, _, err := setsockopt(fd, SCTP_SOCKOPT_CONNECTX, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)))
- return int(r0), err
-}
-
-func SCTPBind(fd int, addr *SCTPAddr, flags int) error {
- var option uintptr
- switch flags {
- case SCTP_BINDX_ADD_ADDR:
- option = SCTP_SOCKOPT_BINDX_ADD
- case SCTP_BINDX_REM_ADDR:
- option = SCTP_SOCKOPT_BINDX_REM
- default:
- return syscall.EINVAL
- }
-
- buf := addr.ToRawSockAddrBuf()
- _, _, err := setsockopt(fd, option, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)))
- return err
-}
-
-type SCTPConn struct {
- _fd int32
- notificationHandler NotificationHandler
-}
-
-func (c *SCTPConn) fd() int {
- return int(atomic.LoadInt32(&c._fd))
-}
-
-func NewSCTPConn(fd int, handler NotificationHandler) *SCTPConn {
- conn := &SCTPConn{
- _fd: int32(fd),
- notificationHandler: handler,
- }
- return conn
-}
-
-func (c *SCTPConn) Write(b []byte) (int, error) {
- return c.SCTPWrite(b, nil)
-}
-
-func (c *SCTPConn) Read(b []byte) (int, error) {
- n, _, err := c.SCTPRead(b)
- if n < 0 {
- n = 0
- }
- return n, err
-}
-
-func (c *SCTPConn) SetInitMsg(numOstreams, maxInstreams, maxAttempts, maxInitTimeout int) error {
- return setInitOpts(c.fd(), InitMsg{
- NumOstreams: uint16(numOstreams),
- MaxInstreams: uint16(maxInstreams),
- MaxAttempts: uint16(maxAttempts),
- MaxInitTimeout: uint16(maxInitTimeout),
- })
-}
-
-func (c *SCTPConn) SubscribeEvents(flags int) error {
- var d, a, ad, sf, p, sh, pa, ada, au, se uint8
- if flags&SCTP_EVENT_DATA_IO > 0 {
- d = 1
- }
- if flags&SCTP_EVENT_ASSOCIATION > 0 {
- a = 1
- }
- if flags&SCTP_EVENT_ADDRESS > 0 {
- ad = 1
- }
- if flags&SCTP_EVENT_SEND_FAILURE > 0 {
- sf = 1
- }
- if flags&SCTP_EVENT_PEER_ERROR > 0 {
- p = 1
- }
- if flags&SCTP_EVENT_SHUTDOWN > 0 {
- sh = 1
- }
- if flags&SCTP_EVENT_PARTIAL_DELIVERY > 0 {
- pa = 1
- }
- if flags&SCTP_EVENT_ADAPTATION_LAYER > 0 {
- ada = 1
- }
- if flags&SCTP_EVENT_AUTHENTICATION > 0 {
- au = 1
- }
- if flags&SCTP_EVENT_SENDER_DRY > 0 {
- se = 1
- }
- param := EventSubscribe{
- DataIO: d,
- Association: a,
- Address: ad,
- SendFailure: sf,
- PeerError: p,
- Shutdown: sh,
- PartialDelivery: pa,
- AdaptationLayer: ada,
- Authentication: au,
- SenderDry: se,
- }
- optlen := unsafe.Sizeof(param)
- _, _, err := setsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(&param)), uintptr(optlen))
- return err
-}
-
-func (c *SCTPConn) SubscribedEvents() (int, error) {
- param := EventSubscribe{}
- optlen := unsafe.Sizeof(param)
- _, _, err := getsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
- if err != nil {
- return 0, err
- }
- var flags int
- if param.DataIO > 0 {
- flags |= SCTP_EVENT_DATA_IO
- }
- if param.Association > 0 {
- flags |= SCTP_EVENT_ASSOCIATION
- }
- if param.Address > 0 {
- flags |= SCTP_EVENT_ADDRESS
- }
- if param.SendFailure > 0 {
- flags |= SCTP_EVENT_SEND_FAILURE
- }
- if param.PeerError > 0 {
- flags |= SCTP_EVENT_PEER_ERROR
- }
- if param.Shutdown > 0 {
- flags |= SCTP_EVENT_SHUTDOWN
- }
- if param.PartialDelivery > 0 {
- flags |= SCTP_EVENT_PARTIAL_DELIVERY
- }
- if param.AdaptationLayer > 0 {
- flags |= SCTP_EVENT_ADAPTATION_LAYER
- }
- if param.Authentication > 0 {
- flags |= SCTP_EVENT_AUTHENTICATION
- }
- if param.SenderDry > 0 {
- flags |= SCTP_EVENT_SENDER_DRY
- }
- return flags, nil
-}
-
-func (c *SCTPConn) SetDefaultSentParam(info *SndRcvInfo) error {
- optlen := unsafe.Sizeof(*info)
- _, _, err := setsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(optlen))
- return err
-}
-
-func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) {
- info := &SndRcvInfo{}
- optlen := unsafe.Sizeof(*info)
- _, _, err := getsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(&optlen)))
- return info, err
-}
-
-func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) {
- addr := &SCTPAddr{
- IPAddrs: make([]net.IPAddr, n),
- }
-
- switch family := (*(*syscall.RawSockaddrAny)(ptr)).Addr.Family; family {
- case syscall.AF_INET:
- addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port)))
- tmp := syscall.RawSockaddrInet4{}
- size := unsafe.Sizeof(tmp)
- for i := 0; i < n; i++ {
- a := *(*syscall.RawSockaddrInet4)(unsafe.Pointer(
- uintptr(ptr) + size*uintptr(i)))
- addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:]}
- }
- case syscall.AF_INET6:
- addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port)))
- tmp := syscall.RawSockaddrInet6{}
- size := unsafe.Sizeof(tmp)
- for i := 0; i < n; i++ {
- a := *(*syscall.RawSockaddrInet6)(unsafe.Pointer(
- uintptr(ptr) + size*uintptr(i)))
- var zone string
- ifi, err := net.InterfaceByIndex(int(a.Scope_id))
- if err == nil {
- zone = ifi.Name
- }
- addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:], Zone: zone}
- }
- default:
- return nil, fmt.Errorf("unknown address family: %d", family)
- }
- return addr, nil
-}
-
-func sctpGetAddrs(fd, id, optname int) (*SCTPAddr, error) {
-
- type getaddrs struct {
- assocId int32
- addrNum uint32
- addrs [4096]byte
- }
- param := getaddrs{
- assocId: int32(id),
- }
- optlen := unsafe.Sizeof(param)
- _, _, err := getsockopt(fd, uintptr(optname), uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
- if err != nil {
- return nil, err
- }
- return resolveFromRawAddr(unsafe.Pointer(&param.addrs), int(param.addrNum))
-}
-
-func (c *SCTPConn) SCTPGetPrimaryPeerAddr() (*SCTPAddr, error) {
-
- type sctpGetSetPrim struct {
- assocId int32
- addrs [128]byte
- }
- param := sctpGetSetPrim{
- assocId: int32(0),
- }
- optlen := unsafe.Sizeof(param)
- _, _, err := getsockopt(c.fd(), SCTP_PRIMARY_ADDR, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
- if err != nil {
- return nil, err
- }
- return resolveFromRawAddr(unsafe.Pointer(&param.addrs), 1)
-}
-
-func (c *SCTPConn) SCTPLocalAddr(id int) (*SCTPAddr, error) {
- return sctpGetAddrs(c.fd(), id, SCTP_GET_LOCAL_ADDRS)
-}
-
-func (c *SCTPConn) SCTPRemoteAddr(id int) (*SCTPAddr, error) {
- return sctpGetAddrs(c.fd(), id, SCTP_GET_PEER_ADDRS)
-}
-
-func (c *SCTPConn) LocalAddr() net.Addr {
- addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_LOCAL_ADDRS)
- if err != nil {
- return nil
- }
- return addr
-}
-
-func (c *SCTPConn) RemoteAddr() net.Addr {
- addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_PEER_ADDRS)
- if err != nil {
- return nil
- }
- return addr
-}
-
-func (c *SCTPConn) PeelOff(id int) (*SCTPConn, error) {
- type peeloffArg struct {
- assocId int32
- sd int
- }
- param := peeloffArg{
- assocId: int32(id),
- }
- optlen := unsafe.Sizeof(param)
- _, _, err := getsockopt(c.fd(), SCTP_SOCKOPT_PEELOFF, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
- if err != nil {
- return nil, err
- }
- return &SCTPConn{_fd: int32(param.sd)}, nil
-}
-
-func (c *SCTPConn) SetDeadline(t time.Time) error {
- return syscall.EOPNOTSUPP
-}
-
-func (c *SCTPConn) SetReadDeadline(t time.Time) error {
- return syscall.EOPNOTSUPP
-}
-
-func (c *SCTPConn) SetWriteDeadline(t time.Time) error {
- return syscall.EOPNOTSUPP
-}
-
-type SCTPListener struct {
- fd int
- m sync.Mutex
-}
-
-func (ln *SCTPListener) Addr() net.Addr {
- laddr, err := sctpGetAddrs(ln.fd, 0, SCTP_GET_LOCAL_ADDRS)
- if err != nil {
- return nil
- }
- return laddr
-}
-
-type SCTPSndRcvInfoWrappedConn struct {
- conn *SCTPConn
-}
-
-func NewSCTPSndRcvInfoWrappedConn(conn *SCTPConn) *SCTPSndRcvInfoWrappedConn {
- conn.SubscribeEvents(SCTP_EVENT_DATA_IO)
- return &SCTPSndRcvInfoWrappedConn{conn}
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) Write(b []byte) (int, error) {
- if len(b) < int(sndRcvInfoSize) {
- return 0, syscall.EINVAL
- }
- info := (*SndRcvInfo)(unsafe.Pointer(&b[0]))
- n, err := c.conn.SCTPWrite(b[sndRcvInfoSize:], info)
- return n + int(sndRcvInfoSize), err
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) Read(b []byte) (int, error) {
- if len(b) < int(sndRcvInfoSize) {
- return 0, syscall.EINVAL
- }
- n, info, err := c.conn.SCTPRead(b[sndRcvInfoSize:])
- if err != nil {
- return n, err
- }
- copy(b, toBuf(info))
- return n + int(sndRcvInfoSize), err
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) Close() error {
- return c.conn.Close()
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) SetDeadline(t time.Time) error {
- return c.conn.SetDeadline(t)
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) SetWriteDeadline(t time.Time) error {
- return c.conn.SetWriteDeadline(t)
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) SetWriteBuffer(bytes int) error {
- return c.conn.SetWriteBuffer(bytes)
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) GetWriteBuffer() (int, error) {
- return c.conn.GetWriteBuffer()
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) SetReadBuffer(bytes int) error {
- return c.conn.SetReadBuffer(bytes)
-}
-
-func (c *SCTPSndRcvInfoWrappedConn) GetReadBuffer() (int, error) {
- return c.conn.GetReadBuffer()
-}
-
-// SocketConfig contains options for the SCTP socket.
-type SocketConfig struct {
- // If Control is not nil it is called after the socket is created but before
- // it is bound or connected.
- Control func(network, address string, c syscall.RawConn) error
-
- // InitMsg is the options to send in the initial SCTP message
- InitMsg InitMsg
-}
-
-func (cfg *SocketConfig) Listen(net string, laddr *SCTPAddr) (*SCTPListener, error) {
- return listenSCTPExtConfig(net, laddr, cfg.InitMsg, cfg.Control)
-}
-
-func (cfg *SocketConfig) Dial(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) {
- return dialSCTPExtConfig(net, laddr, raddr, cfg.InitMsg, cfg.Control)
-}
diff --git a/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/ishidawataru/sctp/sctp_linux.go
deleted file mode 100644
index d96d09e5c..000000000
--- a/vendor/github.com/ishidawataru/sctp/sctp_linux.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// +build linux,!386
-// Copyright 2019 Wataru Ishida. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sctp
-
-import (
- "io"
- "net"
- "sync/atomic"
- "syscall"
- "unsafe"
-)
-
-func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
- // FIXME: syscall.SYS_SETSOCKOPT is undefined on 386
- r0, r1, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT,
- uintptr(fd),
- SOL_SCTP,
- optname,
- optval,
- optlen,
- 0)
- if errno != 0 {
- return r0, r1, errno
- }
- return r0, r1, nil
-}
-
-func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
- // FIXME: syscall.SYS_GETSOCKOPT is undefined on 386
- r0, r1, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT,
- uintptr(fd),
- SOL_SCTP,
- optname,
- optval,
- optlen,
- 0)
- if errno != 0 {
- return r0, r1, errno
- }
- return r0, r1, nil
-}
-
-type rawConn struct {
- sockfd int
-}
-
-func (r rawConn) Control(f func(fd uintptr)) error {
- f(uintptr(r.sockfd))
- return nil
-}
-
-func (r rawConn) Read(f func(fd uintptr) (done bool)) error {
- panic("not implemented")
-}
-
-func (r rawConn) Write(f func(fd uintptr) (done bool)) error {
- panic("not implemented")
-}
-
-func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) {
- var cbuf []byte
- if info != nil {
- cmsgBuf := toBuf(info)
- hdr := &syscall.Cmsghdr{
- Level: syscall.IPPROTO_SCTP,
- Type: SCTP_CMSG_SNDRCV,
- }
-
- // bitwidth of hdr.Len is platform-specific,
- // so we use hdr.SetLen() rather than directly setting hdr.Len
- hdr.SetLen(syscall.CmsgSpace(len(cmsgBuf)))
- cbuf = append(toBuf(hdr), cmsgBuf...)
- }
- return syscall.SendmsgN(c.fd(), b, cbuf, nil, 0)
-}
-
-func parseSndRcvInfo(b []byte) (*SndRcvInfo, error) {
- msgs, err := syscall.ParseSocketControlMessage(b)
- if err != nil {
- return nil, err
- }
- for _, m := range msgs {
- if m.Header.Level == syscall.IPPROTO_SCTP {
- switch m.Header.Type {
- case SCTP_CMSG_SNDRCV:
- return (*SndRcvInfo)(unsafe.Pointer(&m.Data[0])), nil
- }
- }
- }
- return nil, nil
-}
-
-func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) {
- oob := make([]byte, 254)
- for {
- n, oobn, recvflags, _, err := syscall.Recvmsg(c.fd(), b, oob, 0)
- if err != nil {
- return n, nil, err
- }
-
- if n == 0 && oobn == 0 {
- return 0, nil, io.EOF
- }
-
- if recvflags&MSG_NOTIFICATION > 0 && c.notificationHandler != nil {
- if err := c.notificationHandler(b[:n]); err != nil {
- return 0, nil, err
- }
- } else {
- var info *SndRcvInfo
- if oobn > 0 {
- info, err = parseSndRcvInfo(oob[:oobn])
- }
- return n, info, err
- }
- }
-}
-
-func (c *SCTPConn) Close() error {
- if c != nil {
- fd := atomic.SwapInt32(&c._fd, -1)
- if fd > 0 {
- info := &SndRcvInfo{
- Flags: SCTP_EOF,
- }
- c.SCTPWrite(nil, info)
- syscall.Shutdown(int(fd), syscall.SHUT_RDWR)
- return syscall.Close(int(fd))
- }
- }
- return syscall.EBADF
-}
-
-func (c *SCTPConn) SetWriteBuffer(bytes int) error {
- return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes)
-}
-
-func (c *SCTPConn) GetWriteBuffer() (int, error) {
- return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF)
-}
-
-func (c *SCTPConn) SetReadBuffer(bytes int) error {
- return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes)
-}
-
-func (c *SCTPConn) GetReadBuffer() (int, error) {
- return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF)
-}
-
-// ListenSCTP - start listener on specified address/port
-func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) {
- return ListenSCTPExt(net, laddr, InitMsg{NumOstreams: SCTP_MAX_STREAM})
-}
-
-// ListenSCTPExt - start listener on specified address/port with given SCTP options
-func ListenSCTPExt(network string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) {
- return listenSCTPExtConfig(network, laddr, options, nil)
-}
-
-// listenSCTPExtConfig - start listener on specified address/port with given SCTP options and socket configuration
-func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) {
- af, ipv6only := favoriteAddrFamily(network, laddr, nil, "listen")
- sock, err := syscall.Socket(
- af,
- syscall.SOCK_STREAM,
- syscall.IPPROTO_SCTP,
- )
- if err != nil {
- return nil, err
- }
-
- // close socket on error
- defer func() {
- if err != nil {
- syscall.Close(sock)
- }
- }()
- if err = setDefaultSockopts(sock, af, ipv6only); err != nil {
- return nil, err
- }
- if control != nil {
- rc := rawConn{sockfd: sock}
- if err = control(network, laddr.String(), rc); err != nil {
- return nil, err
- }
- }
- err = setInitOpts(sock, options)
- if err != nil {
- return nil, err
- }
-
- if laddr != nil {
- // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address
- if len(laddr.IPAddrs) == 0 {
- if af == syscall.AF_INET {
- laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero})
- } else if af == syscall.AF_INET6 {
- laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero})
- }
- }
- err = SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR)
- if err != nil {
- return nil, err
- }
- }
- err = syscall.Listen(sock, syscall.SOMAXCONN)
- if err != nil {
- return nil, err
- }
- return &SCTPListener{
- fd: sock,
- }, nil
-}
-
-// AcceptSCTP waits for and returns the next SCTP connection to the listener.
-func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) {
- fd, _, err := syscall.Accept4(ln.fd, 0)
- return NewSCTPConn(fd, nil), err
-}
-
-// Accept waits for and returns the next connection connection to the listener.
-func (ln *SCTPListener) Accept() (net.Conn, error) {
- return ln.AcceptSCTP()
-}
-
-func (ln *SCTPListener) Close() error {
- syscall.Shutdown(ln.fd, syscall.SHUT_RDWR)
- return syscall.Close(ln.fd)
-}
-
-// DialSCTP - bind socket to laddr (if given) and connect to raddr
-func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) {
- return DialSCTPExt(net, laddr, raddr, InitMsg{NumOstreams: SCTP_MAX_STREAM})
-}
-
-// DialSCTPExt - same as DialSCTP but with given SCTP options
-func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) {
- return dialSCTPExtConfig(network, laddr, raddr, options, nil)
-}
-
-// dialSCTPExtConfig - same as DialSCTP but with given SCTP options and socket configuration
-func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) {
- af, ipv6only := favoriteAddrFamily(network, laddr, raddr, "dial")
- sock, err := syscall.Socket(
- af,
- syscall.SOCK_STREAM,
- syscall.IPPROTO_SCTP,
- )
- if err != nil {
- return nil, err
- }
-
- // close socket on error
- defer func() {
- if err != nil {
- syscall.Close(sock)
- }
- }()
- if err = setDefaultSockopts(sock, af, ipv6only); err != nil {
- return nil, err
- }
- if control != nil {
- rc := rawConn{sockfd: sock}
- if err = control(network, laddr.String(), rc); err != nil {
- return nil, err
- }
- }
- err = setInitOpts(sock, options)
- if err != nil {
- return nil, err
- }
- if laddr != nil {
- // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address
- if len(laddr.IPAddrs) == 0 {
- if af == syscall.AF_INET {
- laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero})
- } else if af == syscall.AF_INET6 {
- laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero})
- }
- }
- err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR)
- if err != nil {
- return nil, err
- }
- }
- _, err = SCTPConnect(sock, raddr)
- if err != nil {
- return nil, err
- }
- return NewSCTPConn(sock, nil), nil
-}
diff --git a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go
deleted file mode 100644
index 118fe159e..000000000
--- a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// +build !linux linux,386
-// Copyright 2019 Wataru Ishida. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sctp
-
-import (
- "errors"
- "net"
- "runtime"
- "syscall"
-)
-
-var ErrUnsupported = errors.New("SCTP is unsupported on " + runtime.GOOS + "/" + runtime.GOARCH)
-
-func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
- return 0, 0, ErrUnsupported
-}
-
-func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
- return 0, 0, ErrUnsupported
-}
-
-func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) {
- return 0, ErrUnsupported
-}
-
-func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) {
- return 0, nil, ErrUnsupported
-}
-
-func (c *SCTPConn) Close() error {
- return ErrUnsupported
-}
-
-func (c *SCTPConn) SetWriteBuffer(bytes int) error {
- return ErrUnsupported
-}
-
-func (c *SCTPConn) GetWriteBuffer() (int, error) {
- return 0, ErrUnsupported
-}
-
-func (c *SCTPConn) SetReadBuffer(bytes int) error {
- return ErrUnsupported
-}
-
-func (c *SCTPConn) GetReadBuffer() (int, error) {
- return 0, ErrUnsupported
-}
-
-func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) {
- return nil, ErrUnsupported
-}
-
-func ListenSCTPExt(net string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) {
- return nil, ErrUnsupported
-}
-
-func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) {
- return nil, ErrUnsupported
-}
-
-func (ln *SCTPListener) Accept() (net.Conn, error) {
- return nil, ErrUnsupported
-}
-
-func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) {
- return nil, ErrUnsupported
-}
-
-func (ln *SCTPListener) Close() error {
- return ErrUnsupported
-}
-
-func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) {
- return nil, ErrUnsupported
-}
-
-func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) {
- return nil, ErrUnsupported
-}
-
-func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) {
- return nil, ErrUnsupported
-}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index c3ec9d8a7..5c3c2a258 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,24 @@ This package provides various compression algorithms.
# changelog
+* May 25, 2022 (v1.15.5)
+ * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
+ * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
+ * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
+ * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
+ * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
+ * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
+ * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
+ * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
+ * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+
+
+* May 11, 2022 (v1.15.4)
+ * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
+ * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
+ * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
+ * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
+
* May 5, 2022 (v1.15.3)
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
@@ -77,6 +95,9 @@ While the release has been extensively tested, it is recommended to testing when
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+<details>
+ <summary>See changes to v1.13.x</summary>
+
* Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@@ -105,6 +126,8 @@ While the release has been extensively tested, it is recommended to testing when
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+</details>
+
<details>
<summary>See changes to v1.12.x</summary>
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index bffa2f332..f8435998e 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -84,24 +84,23 @@ type advancedState struct {
length int
offset int
maxInsertIndex int
+ chainHead int
+ hashOffset int
- // Input hash chains
- // hashHead[hashValue] contains the largest inputIndex with the specified hash value
- // If hashHead[hashValue] is within the current window, then
- // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
- // with the same hash value.
- chainHead int
- hashHead [hashSize]uint32
- hashPrev [windowSize]uint32
- hashOffset int
+ ii uint16 // position of last match, intended to overflow to reset.
// input window: unprocessed data is window[index:windowEnd]
index int
estBitsPerByte int
hashMatch [maxMatchLength + minMatchLength]uint32
- hash uint32
- ii uint16 // position of last match, intended to overflow to reset.
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
}
type compressor struct {
@@ -259,7 +258,6 @@ func (d *compressor) fillWindow(b []byte) {
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
- s.hash = newH
}
// Update window information.
d.windowEnd += n
@@ -403,7 +401,6 @@ func (d *compressor) initDeflate() {
s.hashOffset = 1
s.length = minMatchLength - 1
s.offset = 0
- s.hash = 0
s.chainHead = -1
}
@@ -432,9 +429,6 @@ func (d *compressor) deflateLazy() {
}
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index:])
- }
for {
if sanity && s.index > d.windowEnd {
@@ -466,11 +460,11 @@ func (d *compressor) deflateLazy() {
}
if s.index < s.maxInsertIndex {
// Update the hash
- s.hash = hash4(d.window[s.index:])
- ch := s.hashHead[s.hash&hashMask]
+ hash := hash4(d.window[s.index:])
+ ch := s.hashHead[hash]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
+ s.hashHead[hash] = uint32(s.index + s.hashOffset)
}
prevLength := s.length
prevOffset := s.offset
@@ -503,7 +497,7 @@ func (d *compressor) deflateLazy() {
end += prevIndex
idx := prevIndex + prevLength - (4 - checkOff)
h := hash4(d.window[idx:])
- ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff)
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff)
if ch2 > minIndex {
length := matchLen(d.window[prevIndex:end], d.window[ch2:])
// It seems like a pure length metric is best.
@@ -547,7 +541,6 @@ func (d *compressor) deflateLazy() {
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
- s.hash = newH
}
s.index = newIndex
@@ -793,7 +786,6 @@ func (d *compressor) reset(w io.Writer) {
d.tokens.Reset()
s.length = minMatchLength - 1
s.offset = 0
- s.hash = 0
s.ii = 0
s.maxInsertIndex = 0
}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index d55ea2a77..f781aaa62 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -117,7 +117,7 @@ func (e *fastGen) addBlock(src []byte) int32 {
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4u(u uint32, h uint8) uint32 {
- return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32)
+ return (u * prime4bytes) >> (32 - h)
}
type tableEntryPrev struct {
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 451160edd..504a7be9d 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
-// peekTopBits(n) is equvialent to peekBitFast(64 - n)
-func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
- return uint16(b.value >> n)
-}
-
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
@@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() {
}
}
-// finished returns true if all bits have been read from the bit stream.
-func (b *bitReaderShifted) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
-}
-
func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index 6bce4e87d..ec71f7a34 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -5,8 +5,6 @@
package huff0
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
-// addBits16NC will add up to 16 bits.
-// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
- b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
- b.nBits += bits
-}
-
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
-// addBits16ZeroNC will add up to 16 bits.
-// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-// This is fastest if bits can be zero.
-func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
- if bits == 0 {
- return
- }
- value <<= (16 - bits) & 15
- value >>= (16 - bits) & 15
- b.bitContainer |= uint64(value) << (b.nBits & 63)
- b.nBits += bits
-}
-
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- return
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- b.bitContainer >>= 1 << 3
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- b.bitContainer >>= 2 << 3
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- b.bitContainer >>= 3 << 3
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- b.bitContainer >>= 4 << 3
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- b.bitContainer >>= 5 << 3
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- b.bitContainer >>= 6 << 3
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- b.bitContainer >>= 7 << 3
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- b.bitContainer = 0
- b.nBits = 0
- return
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.nBits &= 7
-}
-
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
@@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
b.flushAlign()
return nil
}
-
-// reset and continue writing by appending to out.
-func (b *bitWriter) reset(out []byte) {
- b.bitContainer = 0
- b.nBits = 0
- b.out = out
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
index 50bcdf6ea..4dcab8d23 100644
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go
@@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0
}
-// advance the stream b n bytes.
-func (b *byteReader) advance(n uint) {
- b.off += int(n)
-}
-
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3])
@@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
-// unread returns the unread portion of the input.
-func (b byteReader) unread() []byte {
- return b.b[b.off:]
-}
-
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index bc95ac623..4d14542fa 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
+//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 04f652995..c0c48bd70 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -11,7 +11,6 @@ import (
type dTable struct {
single []dEntrySingle
- double []dEntryDouble
}
// single-symbols decoding
@@ -19,13 +18,6 @@ type dEntrySingle struct {
entry uint16
}
-// double-symbols decoding
-type dEntryDouble struct {
- seq [4]byte
- nBits uint8
- len uint8
-}
-
// Uses special code for all tables that are < 8 bits.
const use8BitTables = true
@@ -35,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
- s, err = s.prepare(in)
+ s, err = s.prepare(nil)
if err != nil {
return s, nil, err
}
@@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
return &[4][256]byte{}
}
-// Decompress1X will decompress a 1X encoded stream.
-// The cap of the output buffer will be the maximum decompressed size.
-// The length of the supplied input must match the end of a block exactly.
-func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress1X8Bit(dst, src)
- }
- var br bitReaderShifted
- err := br.init(src)
- if err != nil {
- return dst, err
- }
- maxDecodedSize := cap(dst)
- dst = dst[:0]
-
- // Avoid bounds check by always having full sized table.
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- dt := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- bufs := d.buffer()
- buf := &bufs[0]
- var off uint8
-
- for br.off >= 8 {
- br.fillFast()
- v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+0] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+1] = uint8(v.entry >> 8)
-
- // Refill
- br.fillFast()
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+2] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+3] = uint8(v.entry >> 8)
-
- off += 4
- if off == 0 {
- if len(dst)+256 > maxDecodedSize {
- br.close()
- d.bufs.Put(bufs)
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:]...)
- }
- }
-
- if len(dst)+int(off) > maxDecodedSize {
- d.bufs.Put(bufs)
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:off]...)
-
- // br < 8, so uint8 is fine
- bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
- for bitsLeft > 0 {
- br.fill()
- if false && br.bitsRead >= 32 {
- if br.off >= 4 {
- v := br.in[br.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- br.value = (br.value << 32) | uint64(low)
- br.bitsRead -= 32
- br.off -= 4
- } else {
- for br.off > 0 {
- br.value = (br.value << 8) | uint64(br.in[br.off-1])
- br.bitsRead -= 8
- br.off--
- }
- }
- }
- if len(dst) >= maxDecodedSize {
- d.bufs.Put(bufs)
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
- nBits := uint8(v.entry)
- br.advance(nBits)
- bitsLeft -= nBits
- dst = append(dst, uint8(v.entry>>8))
- }
- d.bufs.Put(bufs)
- return dst, br.close()
-}
-
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
@@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
const shift = 56
const tlSize = 1 << 8
- const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index 3415e5da2..671e630a8 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -2,12 +2,14 @@
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
-// that uses an asm implementation of its main loop.
+// and Decoder.Decompress1X that use an asm implementation of thir main loops.
package huff0
import (
"errors"
"fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
@@ -146,3 +148,81 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
}
return dst, nil
}
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress1X when tablelog > 8.
+//go:noescape
+func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+
+// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
+// of Decompress1X when tablelog > 8.
+//go:noescape
+func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+
+type decompress1xContext struct {
+ pbr *bitReaderShifted
+ peekBits uint8
+ out *byte
+ outCap int
+ tbl *dEntrySingle
+ decoded int
+}
+
+// Error reported by asm implementations
+const error_max_decoded_size_exeeded = -1
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:maxDecodedSize]
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+
+ if maxDecodedSize >= 4 {
+ ctx := decompress1xContext{
+ pbr: &br,
+ out: &dst[0],
+ outCap: maxDecodedSize,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ tbl: &d.dt.single[0],
+ }
+
+ if cpuinfo.HasBMI2() {
+ decompress1x_main_loop_bmi2(&ctx)
+ } else {
+ decompress1x_main_loop_amd64(&ctx)
+ }
+ if ctx.decoded == error_max_decoded_size_exeeded {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+
+ dst = dst[:ctx.decoded]
+ }
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index 06287f568..6c65c6e2b 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -660,3 +660,206 @@ skip_fill1003:
SHLQ $0x02, DX
MOVQ DX, 64(AX)
RET
+
+// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+TEXT ·decompress1x_main_loop_amd64(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exeeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exeeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_1_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_2_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, CX
+ MOVQ 16(AX), DX
+ SUBQ DX, CX
+ MOVQ CX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exeeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
+
+// func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+// Requires: BMI2
+TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exeeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exeeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_1_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_2_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, CX
+ MOVQ 16(AX), DX
+ SUBQ DX, CX
+ MOVQ CX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exeeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
index 126b4d68a..4f6f37cb2 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
}
return dst, nil
}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress1X8Bit(dst, src)
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ dt := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ bufs := d.buffer()
+ buf := &bufs[0]
+ var off uint8
+
+ for br.off >= 8 {
+ br.fillFast()
+ v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ // Refill
+ br.fillFast()
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if false && br.bitsRead >= 32 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value = (br.value << 32) | uint64(low)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value = (br.value << 8) | uint64(br.in[br.off-1])
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ d.bufs.Put(bufs)
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index d7cd15ba2..97299d499 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
return v
}
-func (b *bitReader) get16BitsFast(n uint8) uint16 {
- const regMask = 64 - 1
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
- b.bitsRead += n
- return v
-}
-
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index b36618285..78b3c61be 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -5,8 +5,6 @@
package zstd
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.bitContainer >>= v << 3
- b.nBits &= 7
-}
-
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index b2bca3301..7eed729be 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -49,11 +49,8 @@ const (
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
- maxCompressedLiteralSize = 1 << 18
- maxRLELiteralSize = 1 << 20
- maxMatchLen = 131074
- maxSequences = 0x7f00 + 0xffff
+ maxMatchLen = 131074
+ maxSequences = 0x7f00 + 0xffff
// We support slightly less than the reference decoder to be able to
// use ints on 32 bit archs.
@@ -105,7 +102,6 @@ type blockDec struct {
// Block is RLE, this is the size.
RLESize uint32
- tmp [4]byte
Type blockType
@@ -368,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, litRegenSize)
+ b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else {
- if litRegenSize > maxCompressedLiteralSize {
- // Exceptional
- b.literalBuf = make([]byte, litRegenSize)
- } else {
- b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
- }
+ b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
literals = b.literalBuf[:litRegenSize]
@@ -405,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
var err error
// Use our out buffer.
- huff.MaxDecodedSize = maxCompressedBlockSize
+ huff.MaxDecodedSize = litRegenSize
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
@@ -437,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
- b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
huff := hist.huffTree
@@ -456,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
return in, err
}
hist.huffTree = huff
- huff.MaxDecodedSize = maxCompressedBlockSize
+ huff.MaxDecodedSize = litRegenSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@@ -471,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
+ // Re-cap to get extra size.
+ literals = b.literalBuf[:len(literals)]
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index b80191e4b..4493baa75 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil
}
-func (b *byteBuf) remain() []byte {
- return *b
-}
-
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
index 2c4fca17f..0e59a242d 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytereader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -13,12 +13,6 @@ type byteReader struct {
off int
}
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
- b.b = in
- b.off = 0
-}
-
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 36119f385..286c8f9d7 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -637,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
// Create Decoder:
// ASYNC:
-// Spawn 4 go routines.
-// 0: Read frames and decode blocks.
-// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
-// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
-// 3: Wait for stream history, execute sequences, send stream history.
+// Spawn 3 go routines.
+// 0: Read frames and decode block literals.
+// 1: Decode sequences.
+// 2: Execute sequences, send to output.
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
br := readerWrapper{r: r}
- var seqPrepare = make(chan *blockDec, d.o.concurrent)
var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent)
- // Async 1: Prepare blocks...
- go func() {
- var hist history
- var hasErr bool
- for block := range seqPrepare {
- if hasErr {
- if block != nil {
- seqDecode <- block
- }
- continue
- }
- if block.async.newHist != nil {
- if debugDecoder {
- println("Async 1: new history")
- }
- hist.reset()
- if block.async.newHist.dict != nil {
- hist.setDict(block.async.newHist.dict)
- }
- }
- if block.err != nil || block.Type != blockTypeCompressed {
- hasErr = block.err != nil
- seqDecode <- block
- continue
- }
-
- remain, err := block.decodeLiterals(block.data, &hist)
- block.err = err
- hasErr = block.err != nil
- if err == nil {
- block.async.literals = hist.decoders.literals
- block.async.seqData = remain
- } else if debugDecoder {
- println("decodeLiterals error:", err)
- }
- seqDecode <- block
- }
- close(seqDecode)
- }()
-
- // Async 2: Decode sequences...
+ // Async 1: Decode sequences...
go func() {
var hist history
var hasErr bool
@@ -704,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
- println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
+ println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
}
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
@@ -758,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
- println("Async 3: new history")
+ println("Async 2: new history")
}
hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
@@ -845,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
decodeStream:
for {
+ var hist history
+ var hasErr bool
+
+ decodeBlock := func(block *blockDec) {
+ if hasErr {
+ if block != nil {
+ seqDecode <- block
+ }
+ return
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqDecode <- block
+ return
+ }
+
+ remain, err := block.decodeLiterals(block.data, &hist)
+ block.err = err
+ hasErr = block.err != nil
+ if err == nil {
+ block.async.literals = hist.decoders.literals
+ block.async.seqData = remain
+ } else if debugDecoder {
+ println("decodeLiterals error:", err)
+ }
+ seqDecode <- block
+ }
frame := d.frame
if debugDecoder {
println("New frame...")
@@ -871,7 +856,7 @@ decodeStream:
case <-ctx.Done():
case dec := <-d.decoders:
dec.sendErr(err)
- seqPrepare <- dec
+ decodeBlock(dec)
}
break decodeStream
}
@@ -891,6 +876,10 @@ decodeStream:
if debugDecoder {
println("Alloc History:", h.allocFrameBuffer)
}
+ hist.reset()
+ if h.dict != nil {
+ hist.setDict(h.dict)
+ }
dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize
historySent = true
@@ -917,7 +906,7 @@ decodeStream:
}
err = dec.err
last := dec.Last
- seqPrepare <- dec
+ decodeBlock(dec)
if err != nil {
break decodeStream
}
@@ -926,7 +915,7 @@ decodeStream:
}
}
}
- close(seqPrepare)
+ close(seqDecode)
wg.Wait()
d.frame.history.b = frameHistCache
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 602c05ee0..c769f6941 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -156,8 +156,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -518,8 +518,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -674,8 +674,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -1047,8 +1047,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index d6b310424..7ff0c64fa 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -127,8 +127,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -439,8 +439,8 @@ encodeLoop:
var t int32
for {
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -785,8 +785,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -969,7 +969,7 @@ encodeLoop:
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
- longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+ longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@@ -1002,8 +1002,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index dcc987a7c..e6b1d01cf 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
// If we can do everything in one block, prefer that.
- if len(src) <= maxCompressedBlockSize {
+ if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block.
if e.o.crc {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 3ff109cce..fa0a633f3 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
- if d.o.lowMem && d.history.windowSize < maxBlockSize {
+ if !d.o.lowMem || d.history.windowSize < maxBlockSize {
+ // Alloc 2x window size if not low-mem, or very small window size.
d.history.allocFrameBuffer = d.history.windowSize * 2
- // TODO: Maybe use FrameContent size
} else {
+ // Alloc with one additional block
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index fde4e6b60..23333b969 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -229,18 +229,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16)
}
-func (d decSymbol) baseline() uint32 {
- return uint32(d >> 32)
-}
-
func (d decSymbol) baselineInt() int {
return int(d >> 32)
}
-func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
- *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
-}
-
func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits)
@@ -256,11 +248,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16
}
-func (d *decSymbol) setBaseline(baseline uint32) {
- const mask = 0xffffffff
- *d = (*d & mask) | decSymbol(baseline)<<32
-}
-
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@@ -377,34 +364,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)]
}
-// next returns the current symbol and sets the next state.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) next(br *bitReader) {
- lowBits := uint16(br.getBits(s.state.nbBits()))
- s.state = s.dt[s.state.newState()+lowBits]
-}
-
-// finished returns true if all bits have been read from the bitstream
-// and the next state would require reading bits from the input.
-func (s *fseState) finished(br *bitReader) bool {
- return br.finished() && s.state.nbBits() > 0
-}
-
-// final returns the current state symbol without decoding the next.
-func (s *fseState) final() (int, uint8) {
- return s.state.baselineInt(), s.state.addBits()
-}
-
// final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits()
}
-
-// nextFast returns the next symbol and sets the next state.
-// This can only be used if no symbols are 0 bits.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
- lowBits := br.get16BitsFast(s.state.nbBits())
- s.state = s.dt[s.state.newState()+lowBits]
- return s.state.baseline(), s.state.addBits()
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index 5442061b1..ab26326a8 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0
}
-// prepare will prepare and allocate scratch tables used for both compression and decompression.
-func (s *fseEncoder) prepare() (*fseEncoder, error) {
- if s == nil {
- s = &fseEncoder{}
- }
- s.useRLE = false
- if s.clearCount && s.maxCount == 0 {
- for i := range s.count {
- s.count[i] = 0
- }
- s.clearCount = false
- }
- return s, nil
-}
-
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() {
@@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu]
}
-// encode the output symbol provided and write it to the bitstream.
-func (c *cState) encode(symbolTT symbolTransform) {
- nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
- dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
- c.bw.addBits16NC(c.state, uint8(nbBitsOut))
- c.state = c.stateTable[dstState]
-}
-
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
index cf33f29a1..5d73c21eb 100644
--- a/vendor/github.com/klauspost/compress/zstd/hash.go
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
-
-// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash3(u uint32, h uint8) uint32 {
- return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index e80139dd9..df0447203 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -188,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
}
}
+
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
@@ -203,12 +204,11 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(hist []byte) error {
- if true {
- supported, err := s.decodeSyncSimple(hist)
- if supported {
- return err
- }
+ supported, err := s.decodeSyncSimple(hist)
+ if supported {
+ return err
}
+
br := s.br
seqs := s.nSeqs
startSize := len(s.out)
@@ -396,6 +396,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
+
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@@ -418,16 +419,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return br.close()
}
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) update(br *bitReader) {
- // Max 8 bits
- s.litLengths.state.next(br)
- // Max 9 bits
- s.matchLengths.state.next(br)
- // Max 8 bits
- s.offsets.state.next(br)
-}
-
var bitMask [16]uint16
func init() {
@@ -436,87 +427,6 @@ func init() {
}
}
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) updateAlt(br *bitReader) {
- // Update all 3 states at once. Approx 20% faster.
- a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
-
- nBits := a.nbBits() + b.nbBits() + c.nbBits()
- if nBits == 0 {
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
- s.offsets.state.state = s.offsets.state.dt[c.newState()]
- return
- }
- bits := br.get32BitsFast(nBits)
- lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
-
- lowBits = uint16(bits >> (c.nbBits() & 31))
- lowBits &= bitMask[b.nbBits()&15]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
-
- lowBits = uint16(bits) & bitMask[c.nbBits()&15]
- s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
-}
-
-// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
-func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
- // Final will not read from stream.
- ll, llB := llState.final()
- ml, mlB := mlState.final()
- mo, moB := ofState.final()
-
- // extra bits are stored in reverse order.
- br.fillFast()
- mo += br.getBits(moB)
- if s.maxBits > 32 {
- br.fillFast()
- }
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
-
- if moB > 1 {
- s.prevOffset[2] = s.prevOffset[1]
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = mo
- return
- }
- // mo = s.adjustOffset(mo, ll, moB)
- // Inlined for rather big speedup
- if ll == 0 {
- // There is an exception though, when current sequence's literals_length = 0.
- // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
- // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
- mo++
- }
-
- if mo == 0 {
- mo = s.prevOffset[0]
- return
- }
- var temp int
- if mo == 3 {
- temp = s.prevOffset[0] - 1
- } else {
- temp = s.prevOffset[mo]
- }
-
- if temp == 0 {
- // 0 is not valid; input is corrupted; force offset to 1
- println("temp was 0")
- temp = 1
- }
-
- if mo != 1 {
- s.prevOffset[2] = s.prevOffset[1]
- }
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = temp
- mo = temp
- return
-}
-
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 4676b09cc..847b322ae 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -62,6 +62,10 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
useSafe = true
}
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ useSafe = true
+ }
+
br := s.br
maxBlockSize := maxCompressedBlockSize
@@ -301,6 +305,10 @@ type executeAsmContext struct {
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+// Same as above, but with safe memcopies
+//go:noescape
+func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+
// executeSimple handles cases when dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
@@ -327,8 +335,12 @@ func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
literals: s.literals,
windowSize: s.windowSize,
}
-
- ok := sequenceDecs_executeSimple_amd64(&ctx)
+ var ok bool
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
+ } else {
+ ok = sequenceDecs_executeSimple_amd64(&ctx)
+ }
if !ok {
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 2585b2e98..212c6cac3 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -705,60 +705,55 @@ sequenceDecs_decode_bmi2_fill_2_end:
MOVQ CX, (R9)
// Fill bitreader for state updates
- MOVQ R13, (SP)
- MOVQ $0x00000808, CX
- BEXTRQ CX, R8, R13
- MOVQ ctx+16(FP), CX
- CMPQ 96(CX), $0x00
- JZ sequenceDecs_decode_bmi2_skip_update
-
- // Update Literal Length State
- MOVBQZX SI, R14
- MOVQ $0x00001010, CX
- BEXTRQ CX, SI, SI
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
- ADDQ R15, SI
- // Load ctx.llTable
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
MOVQ ctx+16(FP), CX
- MOVQ (CX), CX
- MOVQ (CX)(SI*8), SI
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
// Update Match Length State
- MOVBQZX DI, R14
- MOVQ $0x00001010, CX
- BEXTRQ CX, DI, DI
- LEAQ (DX)(R14*1), CX
- MOVQ AX, R15
- MOVQ CX, DX
- ROLQ CL, R15
- BZHIQ R14, R15, R15
- ADDQ R15, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
- // Update Offset State
- MOVBQZX R8, R14
- MOVQ $0x00001010, CX
- BEXTRQ CX, R8, R8
- LEAQ (DX)(R14*1), CX
- MOVQ AX, R15
- MOVQ CX, DX
- ROLQ CL, R15
- BZHIQ R14, R15, R15
- ADDQ R15, R8
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
- // Load ctx.ofTable
+ // Load ctx.llTable
MOVQ ctx+16(FP), CX
- MOVQ 48(CX), CX
- MOVQ (CX)(R8*8), R8
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
sequenceDecs_decode_bmi2_skip_update:
// Adjust offset
@@ -965,60 +960,55 @@ sequenceDecs_decode_56_bmi2_fill_end:
MOVQ CX, (R9)
// Fill bitreader for state updates
- MOVQ R13, (SP)
- MOVQ $0x00000808, CX
- BEXTRQ CX, R8, R13
- MOVQ ctx+16(FP), CX
- CMPQ 96(CX), $0x00
- JZ sequenceDecs_decode_56_bmi2_skip_update
-
- // Update Literal Length State
- MOVBQZX SI, R14
- MOVQ $0x00001010, CX
- BEXTRQ CX, SI, SI
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
- ADDQ R15, SI
- // Load ctx.llTable
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
MOVQ ctx+16(FP), CX
- MOVQ (CX), CX
- MOVQ (CX)(SI*8), SI
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
// Update Match Length State
- MOVBQZX DI, R14
- MOVQ $0x00001010, CX
- BEXTRQ CX, DI, DI
- LEAQ (DX)(R14*1), CX
- MOVQ AX, R15
- MOVQ CX, DX
- ROLQ CL, R15
- BZHIQ R14, R15, R15
- ADDQ R15, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
- // Update Offset State
- MOVBQZX R8, R14
- MOVQ $0x00001010, CX
- BEXTRQ CX, R8, R8
- LEAQ (DX)(R14*1), CX
- MOVQ AX, R15
- MOVQ CX, DX
- ROLQ CL, R15
- BZHIQ R14, R15, R15
- ADDQ R15, R8
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
- // Load ctx.ofTable
+ // Load ctx.llTable
MOVQ ctx+16(FP), CX
- MOVQ 48(CX), CX
- MOVQ (CX)(R8*8), R8
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
sequenceDecs_decode_56_bmi2_skip_update:
// Adjust offset
@@ -1171,6 +1161,228 @@ main_loop:
TESTQ R11, R11
JZ check_offset
XORQ R14, R14
+
+copy_1:
+ MOVUPS (SI)(R14*1), X0
+ MOVUPS X0, (BX)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, R11
+ JB copy_1
+ ADDQ R11, SI
+ ADDQ R11, BX
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JGE copy_all_from_history
+ XORQ R11, R11
+ TESTQ $0x00000001, R13
+ JZ copy_4_word
+ MOVB (R14)(R11*1), R12
+ MOVB R12, (BX)(R11*1)
+ ADDQ $0x01, R11
+
+copy_4_word:
+ TESTQ $0x00000002, R13
+ JZ copy_4_dword
+ MOVW (R14)(R11*1), R12
+ MOVW R12, (BX)(R11*1)
+ ADDQ $0x02, R11
+
+copy_4_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_4_qword
+ MOVL (R14)(R11*1), R12
+ MOVL R12, (BX)(R11*1)
+ ADDQ $0x04, R11
+
+copy_4_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_4_test
+ MOVQ (R14)(R11*1), R12
+ MOVQ R12, (BX)(R11*1)
+ ADDQ $0x08, R11
+ JMP copy_4_test
+
+copy_4:
+ MOVUPS (R14)(R11*1), X0
+ MOVUPS X0, (BX)(R11*1)
+ ADDQ $0x10, R11
+
+copy_4_test:
+ CMPQ R11, R13
+ JB copy_4
+ ADDQ R13, DI
+ ADDQ R13, BX
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ XORQ R15, R15
+ TESTQ $0x00000001, R11
+ JZ copy_5_word
+ MOVB (R14)(R15*1), BP
+ MOVB BP, (BX)(R15*1)
+ ADDQ $0x01, R15
+
+copy_5_word:
+ TESTQ $0x00000002, R11
+ JZ copy_5_dword
+ MOVW (R14)(R15*1), BP
+ MOVW BP, (BX)(R15*1)
+ ADDQ $0x02, R15
+
+copy_5_dword:
+ TESTQ $0x00000004, R11
+ JZ copy_5_qword
+ MOVL (R14)(R15*1), BP
+ MOVL BP, (BX)(R15*1)
+ ADDQ $0x04, R15
+
+copy_5_qword:
+ TESTQ $0x00000008, R11
+ JZ copy_5_test
+ MOVQ (R14)(R15*1), BP
+ MOVQ BP, (BX)(R15*1)
+ ADDQ $0x08, R15
+ JMP copy_5_test
+
+copy_5:
+ MOVUPS (R14)(R15*1), X0
+ MOVUPS X0, (BX)(R15*1)
+ ADDQ $0x10, R15
+
+copy_5_test:
+ CMPQ R15, R11
+ JB copy_5
+ ADDQ R11, BX
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ TESTQ R13, R13
+ JZ handle_loop
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ BX, R12
+ ADDQ R13, BX
+
+copy_2:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ XORQ R14, R14
TESTQ $0x00000001, R11
JZ copy_1_word
MOVB (SI)(R14*1), R15
@@ -1326,18 +1538,46 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
- ADDQ R13, DI
- MOVQ BX, R12
- ADDQ R13, BX
+ ADDQ R13, DI
+ XORQ R12, R12
+ TESTQ $0x00000001, R13
+ JZ copy_2_word
+ MOVB (R11)(R12*1), R14
+ MOVB R14, (BX)(R12*1)
+ ADDQ $0x01, R12
+
+copy_2_word:
+ TESTQ $0x00000002, R13
+ JZ copy_2_dword
+ MOVW (R11)(R12*1), R14
+ MOVW R14, (BX)(R12*1)
+ ADDQ $0x02, R12
+
+copy_2_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_2_qword
+ MOVL (R11)(R12*1), R14
+ MOVL R14, (BX)(R12*1)
+ ADDQ $0x04, R12
+
+copy_2_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_2_test
+ MOVQ (R11)(R12*1), R14
+ MOVQ R14, (BX)(R12*1)
+ ADDQ $0x08, R12
+ JMP copy_2_test
copy_2:
- MOVUPS (R11), X0
- MOVUPS X0, (R12)
- ADDQ $0x10, R11
+ MOVUPS (R11)(R12*1), X0
+ MOVUPS X0, (BX)(R12*1)
ADDQ $0x10, R12
- SUBQ $0x10, R13
- JHI copy_2
- JMP handle_loop
+
+copy_2_test:
+ CMPQ R12, R13
+ JB copy_2
+ ADDQ R13, BX
+ JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
@@ -1673,45 +1913,16 @@ sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
TESTQ AX, AX
JZ check_offset
XORQ R14, R14
- TESTQ $0x00000001, AX
- JZ copy_1_word
- MOVB (R11)(R14*1), R15
- MOVB R15, (R10)(R14*1)
- ADDQ $0x01, R14
-
-copy_1_word:
- TESTQ $0x00000002, AX
- JZ copy_1_dword
- MOVW (R11)(R14*1), R15
- MOVW R15, (R10)(R14*1)
- ADDQ $0x02, R14
-
-copy_1_dword:
- TESTQ $0x00000004, AX
- JZ copy_1_qword
- MOVL (R11)(R14*1), R15
- MOVL R15, (R10)(R14*1)
- ADDQ $0x04, R14
-
-copy_1_qword:
- TESTQ $0x00000008, AX
- JZ copy_1_test
- MOVQ (R11)(R14*1), R15
- MOVQ R15, (R10)(R14*1)
- ADDQ $0x08, R14
- JMP copy_1_test
copy_1:
MOVUPS (R11)(R14*1), X0
MOVUPS X0, (R10)(R14*1)
ADDQ $0x10, R14
-
-copy_1_test:
- CMPQ R14, AX
- JB copy_1
- ADDQ AX, R11
- ADDQ AX, R10
- ADDQ AX, R12
+ CMPQ R14, AX
+ JB copy_1
+ ADDQ AX, R11
+ ADDQ AX, R10
+ ADDQ AX, R12
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
check_offset:
@@ -2044,60 +2255,55 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
MOVQ CX, 24(SP)
// Fill bitreader for state updates
- MOVQ R12, (SP)
- MOVQ $0x00000808, CX
- BEXTRQ CX, R8, R12
- MOVQ ctx+16(FP), CX
- CMPQ 96(CX), $0x00
- JZ sequenceDecs_decodeSync_bmi2_skip_update
-
- // Update Literal Length State
- MOVBQZX SI, R13
- MOVQ $0x00001010, CX
- BEXTRQ CX, SI, SI
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
- ADDQ R14, SI
- // Load ctx.llTable
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
MOVQ ctx+16(FP), CX
- MOVQ (CX), CX
- MOVQ (CX)(SI*8), SI
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
// Update Match Length State
- MOVBQZX DI, R13
- MOVQ $0x00001010, CX
- BEXTRQ CX, DI, DI
- LEAQ (DX)(R13*1), CX
- MOVQ AX, R14
- MOVQ CX, DX
- ROLQ CL, R14
- BZHIQ R13, R14, R14
- ADDQ R14, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
- // Update Offset State
- MOVBQZX R8, R13
- MOVQ $0x00001010, CX
- BEXTRQ CX, R8, R8
- LEAQ (DX)(R13*1), CX
- MOVQ AX, R14
- MOVQ CX, DX
- ROLQ CL, R14
- BZHIQ R13, R14, R14
- ADDQ R14, R8
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
- // Load ctx.ofTable
+ // Load ctx.llTable
MOVQ ctx+16(FP), CX
- MOVQ 48(CX), CX
- MOVQ (CX)(R8*8), R8
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
sequenceDecs_decodeSync_bmi2_skip_update:
// Adjust offset
@@ -2180,45 +2386,16 @@ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
TESTQ CX, CX
JZ check_offset
XORQ R14, R14
- TESTQ $0x00000001, CX
- JZ copy_1_word
- MOVB (R10)(R14*1), R15
- MOVB R15, (R9)(R14*1)
- ADDQ $0x01, R14
-
-copy_1_word:
- TESTQ $0x00000002, CX
- JZ copy_1_dword
- MOVW (R10)(R14*1), R15
- MOVW R15, (R9)(R14*1)
- ADDQ $0x02, R14
-
-copy_1_dword:
- TESTQ $0x00000004, CX
- JZ copy_1_qword
- MOVL (R10)(R14*1), R15
- MOVL R15, (R9)(R14*1)
- ADDQ $0x04, R14
-
-copy_1_qword:
- TESTQ $0x00000008, CX
- JZ copy_1_test
- MOVQ (R10)(R14*1), R15
- MOVQ R15, (R9)(R14*1)
- ADDQ $0x08, R14
- JMP copy_1_test
copy_1:
MOVUPS (R10)(R14*1), X0
MOVUPS X0, (R9)(R14*1)
ADDQ $0x10, R14
-
-copy_1_test:
- CMPQ R14, CX
- JB copy_1
- ADDQ CX, R10
- ADDQ CX, R9
- ADDQ CX, R11
+ CMPQ R14, CX
+ JB copy_1
+ ADDQ CX, R10
+ ADDQ CX, R9
+ ADDQ CX, R11
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
check_offset:
@@ -3108,60 +3285,55 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
MOVQ CX, 24(SP)
// Fill bitreader for state updates
- MOVQ R12, (SP)
- MOVQ $0x00000808, CX
- BEXTRQ CX, R8, R12
- MOVQ ctx+16(FP), CX
- CMPQ 96(CX), $0x00
- JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
-
- // Update Literal Length State
- MOVBQZX SI, R13
- MOVQ $0x00001010, CX
- BEXTRQ CX, SI, SI
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
- ADDQ R14, SI
- // Load ctx.llTable
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
MOVQ ctx+16(FP), CX
- MOVQ (CX), CX
- MOVQ (CX)(SI*8), SI
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
// Update Match Length State
- MOVBQZX DI, R13
- MOVQ $0x00001010, CX
- BEXTRQ CX, DI, DI
- LEAQ (DX)(R13*1), CX
- MOVQ AX, R14
- MOVQ CX, DX
- ROLQ CL, R14
- BZHIQ R13, R14, R14
- ADDQ R14, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
- // Update Offset State
- MOVBQZX R8, R13
- MOVQ $0x00001010, CX
- BEXTRQ CX, R8, R8
- LEAQ (DX)(R13*1), CX
- MOVQ AX, R14
- MOVQ CX, DX
- ROLQ CL, R14
- BZHIQ R13, R14, R14
- ADDQ R14, R8
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
- // Load ctx.ofTable
+ // Load ctx.llTable
MOVQ ctx+16(FP), CX
- MOVQ 48(CX), CX
- MOVQ (CX)(R8*8), R8
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
sequenceDecs_decodeSync_safe_bmi2_skip_update:
// Adjust offset
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index b53f606a1..29c15c8c4 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -18,7 +18,14 @@ const ZipMethodWinZip = 93
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20
-var zipReaderPool sync.Pool
+// zipReaderPool is the default reader pool.
+var zipReaderPool = sync.Pool{New: func() interface{} {
+ z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
+ if err != nil {
+ panic(err)
+ }
+ return z
+}}
// newZipReader creates a pooled zip decompressor.
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index c1c90b4a0..3eb3f1c82 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) {
}
}
-// matchLenFast does matching, but will not match the last up to 7 bytes.
-func matchLenFast(a, b []byte) int {
- endI := len(a) & (math.MaxInt32 - 7)
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- return i + bits.TrailingZeros64(diff)>>3
- }
- }
- return endI
-}
-
// matchLen returns the maximum length.
// a must be the shortest of the two.
// The function also returns whether all bytes matched.
diff --git a/vendor/github.com/moby/sys/mount/flags_bsd.go b/vendor/github.com/moby/sys/mount/flags_bsd.go
index 27d8440aa..a7f8a7195 100644
--- a/vendor/github.com/moby/sys/mount/flags_bsd.go
+++ b/vendor/github.com/moby/sys/mount/flags_bsd.go
@@ -1,3 +1,4 @@
+//go:build freebsd || openbsd
// +build freebsd openbsd
package mount
diff --git a/vendor/github.com/moby/sys/mount/flags_unix.go b/vendor/github.com/moby/sys/mount/flags_unix.go
index 995d72807..19fa61fcc 100644
--- a/vendor/github.com/moby/sys/mount/flags_unix.go
+++ b/vendor/github.com/moby/sys/mount/flags_unix.go
@@ -1,3 +1,4 @@
+//go:build !darwin && !windows
// +build !darwin,!windows
package mount
@@ -101,7 +102,7 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
}
opt := strings.SplitN(option, "=", 2)
if len(opt) != 2 || !validFlags[opt[0]] {
- return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ return nil, fmt.Errorf("invalid tmpfs option %q", opt)
}
if !dataCollisions[opt[0]] {
// We prepend the option and add to collision map
diff --git a/vendor/github.com/moby/sys/mount/go.mod b/vendor/github.com/moby/sys/mount/go.mod
index e1e03a376..f02197926 100644
--- a/vendor/github.com/moby/sys/mount/go.mod
+++ b/vendor/github.com/moby/sys/mount/go.mod
@@ -1,8 +1,8 @@
module github.com/moby/sys/mount
-go 1.14
+go 1.16
require (
- github.com/moby/sys/mountinfo v0.4.0
- golang.org/x/sys v0.0.0-20200922070232-aee5d888a860
+ github.com/moby/sys/mountinfo v0.6.2
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
)
diff --git a/vendor/github.com/moby/sys/mount/go.sum b/vendor/github.com/moby/sys/mount/go.sum
index 7c39d597b..a2d6c47a1 100644
--- a/vendor/github.com/moby/sys/mount/go.sum
+++ b/vendor/github.com/moby/sys/mount/go.sum
@@ -1,5 +1,4 @@
-github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860 h1:YEu4SMq7D0cmT7CBbXfcH0NZeuChAXwsHe/9XueUO6o=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/moby/sys/mount/mount_errors.go b/vendor/github.com/moby/sys/mount/mount_errors.go
index 936a26373..b0d8582e8 100644
--- a/vendor/github.com/moby/sys/mount/mount_errors.go
+++ b/vendor/github.com/moby/sys/mount/mount_errors.go
@@ -1,4 +1,5 @@
-// +build !windows
+//go:build !darwin && !windows
+// +build !darwin,!windows
package mount
diff --git a/vendor/github.com/moby/sys/mount/mount_unix.go b/vendor/github.com/moby/sys/mount/mount_unix.go
index a250bfc80..4053fbbeb 100644
--- a/vendor/github.com/moby/sys/mount/mount_unix.go
+++ b/vendor/github.com/moby/sys/mount/mount_unix.go
@@ -1,3 +1,4 @@
+//go:build !darwin && !windows
// +build !darwin,!windows
package mount
@@ -22,7 +23,7 @@ func Mount(device, target, mType, options string) error {
// a normal unmount. If target is not a mount point, no error is returned.
func Unmount(target string) error {
err := unix.Unmount(target, mntDetach)
- if err == nil || err == unix.EINVAL {
+ if err == nil || err == unix.EINVAL { //nolint:errorlint // unix errors are bare
// Ignore "not mounted" error here. Note the same error
// can be returned if flags are invalid, so this code
// assumes that the flags value is always correct.
diff --git a/vendor/github.com/moby/sys/mount/mounter_bsd.go b/vendor/github.com/moby/sys/mount/mounter_freebsd.go
index 656b762fe..1fffb6901 100644
--- a/vendor/github.com/moby/sys/mount/mounter_bsd.go
+++ b/vendor/github.com/moby/sys/mount/mounter_freebsd.go
@@ -1,4 +1,5 @@
-// +build freebsd,cgo openbsd,cgo
+//go:build freebsd && cgo
+// +build freebsd,cgo
package mount
diff --git a/vendor/github.com/moby/sys/mount/mounter_linux.go b/vendor/github.com/moby/sys/mount/mounter_linux.go
index 0c477cc3d..4e18f4b67 100644
--- a/vendor/github.com/moby/sys/mount/mounter_linux.go
+++ b/vendor/github.com/moby/sys/mount/mounter_linux.go
@@ -65,7 +65,6 @@ func mount(device, target, mType string, flags uintptr, data string) error {
flags: oflags | unix.MS_REMOUNT,
err: err,
}
-
}
}
diff --git a/vendor/github.com/moby/sys/mount/mounter_openbsd.go b/vendor/github.com/moby/sys/mount/mounter_openbsd.go
new file mode 100644
index 000000000..3c0718b90
--- /dev/null
+++ b/vendor/github.com/moby/sys/mount/mounter_openbsd.go
@@ -0,0 +1,78 @@
+//go:build openbsd && cgo
+// +build openbsd,cgo
+
+/*
+ Due to how OpenBSD mount(2) works, filesystem types need to be
+ supported explicitly since it uses separate structs to pass
+ filesystem-specific arguments.
+
+ For now only UFS/FFS is supported as it's the default fs
+ on OpenBSD systems.
+
+ See: https://man.openbsd.org/mount.2
+*/
+
+package mount
+
+/*
+#include <sys/types.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+func createExportInfo(readOnly bool) C.struct_export_args {
+ exportFlags := C.int(0)
+ if readOnly {
+ exportFlags = C.MNT_EXRDONLY
+ }
+ out := C.struct_export_args{
+ ex_root: 0,
+ ex_flags: exportFlags,
+ }
+ return out
+}
+
+func createUfsArgs(device string, readOnly bool) unsafe.Pointer {
+ out := &C.struct_ufs_args{
+ fspec: C.CString(device),
+ export_info: createExportInfo(readOnly),
+ }
+ return unsafe.Pointer(out)
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ readOnly := flag&RDONLY != 0
+
+ var fsArgs unsafe.Pointer
+
+ switch mType {
+ case "ffs":
+ fsArgs = createUfsArgs(device, readOnly)
+ default:
+ return &mountError{
+ op: "mount",
+ source: device,
+ target: target,
+ flags: flag,
+ err: fmt.Errorf("unsupported file system type: %s", mType),
+ }
+ }
+
+ if errno := C.mount(C.CString(mType), C.CString(target), C.int(flag), fsArgs); errno != 0 {
+ return &mountError{
+ op: "mount",
+ source: device,
+ target: target,
+ flags: flag,
+ err: syscall.Errno(errno),
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/moby/sys/mount/mounter_unsupported.go b/vendor/github.com/moby/sys/mount/mounter_unsupported.go
index e7ff5bd9f..b69d62bd6 100644
--- a/vendor/github.com/moby/sys/mount/mounter_unsupported.go
+++ b/vendor/github.com/moby/sys/mount/mounter_unsupported.go
@@ -1,4 +1,5 @@
-// +build !linux,!freebsd,!openbsd,!windows freebsd,!cgo openbsd,!cgo
+//go:build (!linux && !freebsd && !openbsd && !windows && !darwin) || (freebsd && !cgo) || (openbsd && !cgo)
+// +build !linux,!freebsd,!openbsd,!windows,!darwin freebsd,!cgo openbsd,!cgo
package mount
diff --git a/vendor/github.com/moby/sys/mountinfo/go.mod b/vendor/github.com/moby/sys/mountinfo/go.mod
index 1cc3efcf7..e1bcdfe79 100644
--- a/vendor/github.com/moby/sys/mountinfo/go.mod
+++ b/vendor/github.com/moby/sys/mountinfo/go.mod
@@ -2,4 +2,4 @@ module github.com/moby/sys/mountinfo
go 1.16
-require golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
+require golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
diff --git a/vendor/github.com/moby/sys/mountinfo/go.sum b/vendor/github.com/moby/sys/mountinfo/go.sum
index c257a6a29..af14a66ec 100644
--- a/vendor/github.com/moby/sys/mountinfo/go.sum
+++ b/vendor/github.com/moby/sys/mountinfo/go.sum
@@ -1,2 +1,2 @@
-golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
-golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
index bf221e687..e78e72619 100644
--- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
+++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
@@ -15,7 +15,7 @@ import (
//
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
-// be handled separately using e.g. errors.Is(err, os.ErrNotExist).
+// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
//
// This function is only available on Linux. When available (since kernel
// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise,
diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
index 242f82cc7..c7b7678f9 100644
--- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
+++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
@@ -1,5 +1,5 @@
-//go:build linux || (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo)
-// +build linux freebsd,cgo openbsd,cgo darwin,cgo
+//go:build linux || freebsd || openbsd || darwin
+// +build linux freebsd openbsd darwin
package mountinfo
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go
index c7e5cb42a..574aeb876 100644
--- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go
@@ -15,7 +15,7 @@ func GetMounts(f FilterFunc) ([]*Info, error) {
//
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
-// be handled separately using e.g. errors.Is(err, os.ErrNotExist).
+// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
func Mounted(path string) (bool, error) {
// root is always mounted
if path == string(os.PathSeparator) {
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
index d5513a26d..8420f58c7 100644
--- a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
@@ -1,53 +1,37 @@
-//go:build (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo)
-// +build freebsd,cgo openbsd,cgo darwin,cgo
+//go:build freebsd || openbsd || darwin
+// +build freebsd openbsd darwin
package mountinfo
-/*
-#include <sys/param.h>
-#include <sys/ucred.h>
-#include <sys/mount.h>
-*/
-import "C"
-
-import (
- "fmt"
- "reflect"
- "unsafe"
-)
+import "golang.org/x/sys/unix"
// parseMountTable returns information about mounted filesystems
func parseMountTable(filter FilterFunc) ([]*Info, error) {
- var rawEntries *C.struct_statfs
-
- count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
- if count == 0 {
- return nil, fmt.Errorf("failed to call getmntinfo")
+ count, err := unix.Getfsstat(nil, unix.MNT_WAIT)
+ if err != nil {
+ return nil, err
}
- var entries []C.struct_statfs
- header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
- header.Cap = count
- header.Len = count
- header.Data = uintptr(unsafe.Pointer(rawEntries))
+ entries := make([]unix.Statfs_t, count)
+ _, err = unix.Getfsstat(entries, unix.MNT_WAIT)
+ if err != nil {
+ return nil, err
+ }
var out []*Info
for _, entry := range entries {
- var mountinfo Info
var skip, stop bool
- mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
- mountinfo.FSType = C.GoString(&entry.f_fstypename[0])
- mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo := getMountinfo(&entry)
if filter != nil {
// filter out entries we're not interested in
- skip, stop = filter(&mountinfo)
+ skip, stop = filter(mountinfo)
if skip {
continue
}
}
- out = append(out, &mountinfo)
+ out = append(out, mountinfo)
if stop {
break
}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go
new file mode 100644
index 000000000..ecaaa7a9c
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go
@@ -0,0 +1,14 @@
+//go:build freebsd || darwin
+// +build freebsd darwin
+
+package mountinfo
+
+import "golang.org/x/sys/unix"
+
+func getMountinfo(entry *unix.Statfs_t) *Info {
+ return &Info{
+ Mountpoint: unix.ByteSliceToString(entry.Mntonname[:]),
+ FSType: unix.ByteSliceToString(entry.Fstypename[:]),
+ Source: unix.ByteSliceToString(entry.Mntfromname[:]),
+ }
+}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go
new file mode 100644
index 000000000..f682c2d3b
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go
@@ -0,0 +1,11 @@
+package mountinfo
+
+import "golang.org/x/sys/unix"
+
+func getMountinfo(entry *unix.Statfs_t) *Info {
+ return &Info{
+ Mountpoint: unix.ByteSliceToString(entry.F_mntonname[:]),
+ FSType: unix.ByteSliceToString(entry.F_fstypename[:]),
+ Source: unix.ByteSliceToString(entry.F_mntfromname[:]),
+ }
+}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
index 95769a76d..c2e64bc81 100644
--- a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
@@ -1,5 +1,5 @@
-//go:build (!windows && !linux && !freebsd && !openbsd && !darwin) || (freebsd && !cgo) || (openbsd && !cgo) || (darwin && !cgo)
-// +build !windows,!linux,!freebsd,!openbsd,!darwin freebsd,!cgo openbsd,!cgo darwin,!cgo
+//go:build !windows && !linux && !freebsd && !openbsd && !darwin
+// +build !windows,!linux,!freebsd,!openbsd,!darwin
package mountinfo
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
index ba2b2266c..b9ba889b7 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
@@ -1,9 +1,24 @@
package cgroups
import (
+ "errors"
+
"github.com/opencontainers/runc/libcontainer/configs"
)
+var (
+ // ErrDevicesUnsupported is an error returned when a cgroup manager
+ // is not configured to set device rules.
+ ErrDevicesUnsupported = errors.New("cgroup manager is not configured to set device rules")
+
+ // DevicesSetV1 and DevicesSetV2 are functions to set devices for
+ // cgroup v1 and v2, respectively. Unless libcontainer/cgroups/devices
+ // package is imported, it is set to nil, so cgroup managers can't
+ // manage devices.
+ DevicesSetV1 func(path string, r *configs.Resources) error
+ DevicesSetV2 func(path string, r *configs.Resources) error
+)
+
type Manager interface {
// Apply creates a cgroup, if not yet created, and adds a process
// with the specified pid into that cgroup. A special value of -1
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
new file mode 100644
index 000000000..c81b6562a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
@@ -0,0 +1,311 @@
+package fs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type BlkioGroup struct {
+ weightFilename string
+ weightDeviceFilename string
+}
+
+func (s *BlkioGroup) Name() string {
+ return "blkio"
+}
+
+func (s *BlkioGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *BlkioGroup) Set(path string, r *configs.Resources) error {
+ s.detectWeightFilenames(path)
+ if r.BlkioWeight != 0 {
+ if err := cgroups.WriteFile(path, s.weightFilename, strconv.FormatUint(uint64(r.BlkioWeight), 10)); err != nil {
+ return err
+ }
+ }
+
+ if r.BlkioLeafWeight != 0 {
+ if err := cgroups.WriteFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(r.BlkioLeafWeight), 10)); err != nil {
+ return err
+ }
+ }
+ for _, wd := range r.BlkioWeightDevice {
+ if wd.Weight != 0 {
+ if err := cgroups.WriteFile(path, s.weightDeviceFilename, wd.WeightString()); err != nil {
+ return err
+ }
+ }
+ if wd.LeafWeight != 0 {
+ if err := cgroups.WriteFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
+ return err
+ }
+ }
+ }
+ for _, td := range r.BlkioThrottleReadBpsDevice {
+ if err := cgroups.WriteFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range r.BlkioThrottleWriteBpsDevice {
+ if err := cgroups.WriteFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range r.BlkioThrottleReadIOPSDevice {
+ if err := cgroups.WriteFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range r.BlkioThrottleWriteIOPSDevice {
+ if err := cgroups.WriteFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+/*
+examples:
+
+ blkio.sectors
+ 8:0 6792
+
+ blkio.io_service_bytes
+ 8:0 Read 1282048
+ 8:0 Write 2195456
+ 8:0 Sync 2195456
+ 8:0 Async 1282048
+ 8:0 Total 3477504
+ Total 3477504
+
+ blkio.io_serviced
+ 8:0 Read 124
+ 8:0 Write 104
+ 8:0 Sync 104
+ 8:0 Async 124
+ 8:0 Total 228
+ Total 228
+
+ blkio.io_queued
+ 8:0 Read 0
+ 8:0 Write 0
+ 8:0 Sync 0
+ 8:0 Async 0
+ 8:0 Total 0
+ Total 0
+*/
+
+func splitBlkioStatLine(r rune) bool {
+ return r == ' ' || r == ':'
+}
+
+func getBlkioStat(dir, file string) ([]cgroups.BlkioStatEntry, error) {
+ var blkioStats []cgroups.BlkioStatEntry
+ f, err := cgroups.OpenFile(dir, file, os.O_RDONLY)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return blkioStats, nil
+ }
+ return nil, err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ // format: dev type amount
+ fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
+ if len(fields) < 3 {
+ if len(fields) == 2 && fields[0] == "Total" {
+ // skip total line
+ continue
+ } else {
+ return nil, malformedLine(dir, file, sc.Text())
+ }
+ }
+
+ v, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, &parseError{Path: dir, File: file, Err: err}
+ }
+ major := v
+
+ v, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, &parseError{Path: dir, File: file, Err: err}
+ }
+ minor := v
+
+ op := ""
+ valueField := 2
+ if len(fields) == 4 {
+ op = fields[2]
+ valueField = 3
+ }
+ v, err = strconv.ParseUint(fields[valueField], 10, 64)
+ if err != nil {
+ return nil, &parseError{Path: dir, File: file, Err: err}
+ }
+ blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
+ }
+ if err := sc.Err(); err != nil {
+ return nil, &parseError{Path: dir, File: file, Err: err}
+ }
+
+ return blkioStats, nil
+}
+
+func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ type blkioStatInfo struct {
+ filename string
+ blkioStatEntriesPtr *[]cgroups.BlkioStatEntry
+ }
+ bfqDebugStats := []blkioStatInfo{
+ {
+ filename: "blkio.bfq.sectors_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.SectorsRecursive,
+ },
+ {
+ filename: "blkio.bfq.io_service_time_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServiceTimeRecursive,
+ },
+ {
+ filename: "blkio.bfq.io_wait_time_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoWaitTimeRecursive,
+ },
+ {
+ filename: "blkio.bfq.io_merged_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoMergedRecursive,
+ },
+ {
+ filename: "blkio.bfq.io_queued_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoQueuedRecursive,
+ },
+ {
+ filename: "blkio.bfq.time_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoTimeRecursive,
+ },
+ {
+ filename: "blkio.bfq.io_serviced_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServicedRecursive,
+ },
+ {
+ filename: "blkio.bfq.io_service_bytes_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServiceBytesRecursive,
+ },
+ }
+ bfqStats := []blkioStatInfo{
+ {
+ filename: "blkio.bfq.io_serviced_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServicedRecursive,
+ },
+ {
+ filename: "blkio.bfq.io_service_bytes_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServiceBytesRecursive,
+ },
+ }
+ cfqStats := []blkioStatInfo{
+ {
+ filename: "blkio.sectors_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.SectorsRecursive,
+ },
+ {
+ filename: "blkio.io_service_time_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServiceTimeRecursive,
+ },
+ {
+ filename: "blkio.io_wait_time_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoWaitTimeRecursive,
+ },
+ {
+ filename: "blkio.io_merged_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoMergedRecursive,
+ },
+ {
+ filename: "blkio.io_queued_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoQueuedRecursive,
+ },
+ {
+ filename: "blkio.time_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoTimeRecursive,
+ },
+ {
+ filename: "blkio.io_serviced_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServicedRecursive,
+ },
+ {
+ filename: "blkio.io_service_bytes_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServiceBytesRecursive,
+ },
+ }
+ throttleRecursiveStats := []blkioStatInfo{
+ {
+ filename: "blkio.throttle.io_serviced_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServicedRecursive,
+ },
+ {
+ filename: "blkio.throttle.io_service_bytes_recursive",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServiceBytesRecursive,
+ },
+ }
+ baseStats := []blkioStatInfo{
+ {
+ filename: "blkio.throttle.io_serviced",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServicedRecursive,
+ },
+ {
+ filename: "blkio.throttle.io_service_bytes",
+ blkioStatEntriesPtr: &stats.BlkioStats.IoServiceBytesRecursive,
+ },
+ }
+ orderedStats := [][]blkioStatInfo{
+ bfqDebugStats,
+ bfqStats,
+ cfqStats,
+ throttleRecursiveStats,
+ baseStats,
+ }
+
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ for _, statGroup := range orderedStats {
+ for i, statInfo := range statGroup {
+ if blkioStats, err = getBlkioStat(path, statInfo.filename); err != nil || blkioStats == nil {
+ // if error occurs on first file, move to next group
+ if i == 0 {
+ break
+ }
+ return err
+ }
+ *statInfo.blkioStatEntriesPtr = blkioStats
+ // finish if all stats are gathered
+ if i == len(statGroup)-1 {
+ return nil
+ }
+ }
+ }
+ return nil
+}
+
+func (s *BlkioGroup) detectWeightFilenames(path string) {
+ if s.weightFilename != "" {
+ // Already detected.
+ return
+ }
+ if cgroups.PathExists(filepath.Join(path, "blkio.weight")) {
+ s.weightFilename = "blkio.weight"
+ s.weightDeviceFilename = "blkio.weight_device"
+ } else {
+ s.weightFilename = "blkio.bfq.weight"
+ s.weightDeviceFilename = "blkio.bfq.weight_device"
+ }
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
new file mode 100644
index 000000000..6c79f899b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
@@ -0,0 +1,129 @@
+package fs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "golang.org/x/sys/unix"
+)
+
+type CpuGroup struct{}
+
+func (s *CpuGroup) Name() string {
+ return "cpu"
+}
+
+func (s *CpuGroup) Apply(path string, r *configs.Resources, pid int) error {
+ if err := os.MkdirAll(path, 0o755); err != nil {
+ return err
+ }
+ // We should set the real-Time group scheduling settings before moving
+ // in the process because if the process is already in SCHED_RR mode
+ // and no RT bandwidth is set, adding it will fail.
+ if err := s.SetRtSched(path, r); err != nil {
+ return err
+ }
+ // Since we are not using apply(), we need to place the pid
+ // into the procs file.
+ return cgroups.WriteCgroupProc(path, pid)
+}
+
+func (s *CpuGroup) SetRtSched(path string, r *configs.Resources) error {
+ if r.CpuRtPeriod != 0 {
+ if err := cgroups.WriteFile(path, "cpu.rt_period_us", strconv.FormatUint(r.CpuRtPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if r.CpuRtRuntime != 0 {
+ if err := cgroups.WriteFile(path, "cpu.rt_runtime_us", strconv.FormatInt(r.CpuRtRuntime, 10)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpuGroup) Set(path string, r *configs.Resources) error {
+ if r.CpuShares != 0 {
+ shares := r.CpuShares
+ if err := cgroups.WriteFile(path, "cpu.shares", strconv.FormatUint(shares, 10)); err != nil {
+ return err
+ }
+ // read it back
+ sharesRead, err := fscommon.GetCgroupParamUint(path, "cpu.shares")
+ if err != nil {
+ return err
+ }
+ // ... and check
+ if shares > sharesRead {
+ return fmt.Errorf("the maximum allowed cpu-shares is %d", sharesRead)
+ } else if shares < sharesRead {
+ return fmt.Errorf("the minimum allowed cpu-shares is %d", sharesRead)
+ }
+ }
+
+ var period string
+ if r.CpuPeriod != 0 {
+ period = strconv.FormatUint(r.CpuPeriod, 10)
+ if err := cgroups.WriteFile(path, "cpu.cfs_period_us", period); err != nil {
+ // Sometimes when the period to be set is smaller
+ // than the current one, it is rejected by the kernel
+ // (EINVAL) as old_quota/new_period exceeds the parent
+ // cgroup quota limit. If this happens and the quota is
+ // going to be set, ignore the error for now and retry
+ // after setting the quota.
+ if !errors.Is(err, unix.EINVAL) || r.CpuQuota == 0 {
+ return err
+ }
+ } else {
+ period = ""
+ }
+ }
+ if r.CpuQuota != 0 {
+ if err := cgroups.WriteFile(path, "cpu.cfs_quota_us", strconv.FormatInt(r.CpuQuota, 10)); err != nil {
+ return err
+ }
+ if period != "" {
+ if err := cgroups.WriteFile(path, "cpu.cfs_period_us", period); err != nil {
+ return err
+ }
+ }
+ }
+ return s.SetRtSched(path, r)
+}
+
+func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
+ const file = "cpu.stat"
+ f, err := cgroups.OpenFile(path, file, os.O_RDONLY)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ t, v, err := fscommon.ParseKeyValue(sc.Text())
+ if err != nil {
+ return &parseError{Path: path, File: file, Err: err}
+ }
+ switch t {
+ case "nr_periods":
+ stats.CpuStats.ThrottlingData.Periods = v
+
+ case "nr_throttled":
+ stats.CpuStats.ThrottlingData.ThrottledPeriods = v
+
+ case "throttled_time":
+ stats.CpuStats.ThrottlingData.ThrottledTime = v
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
new file mode 100644
index 000000000..d3bd7e111
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
@@ -0,0 +1,166 @@
+package fs
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+const (
+ cgroupCpuacctStat = "cpuacct.stat"
+ cgroupCpuacctUsageAll = "cpuacct.usage_all"
+
+ nanosecondsInSecond = 1000000000
+
+ userModeColumn = 1
+ kernelModeColumn = 2
+ cuacctUsageAllColumnsNumber = 3
+
+ // The value comes from `C.sysconf(C._SC_CLK_TCK)`, and
+ // on Linux it's a constant which is safe to be hard coded,
+ // so we can avoid using cgo here. For details, see:
+ // https://github.com/containerd/cgroups/pull/12
+ clockTicks uint64 = 100
+)
+
+type CpuacctGroup struct{}
+
+func (s *CpuacctGroup) Name() string {
+ return "cpuacct"
+}
+
+func (s *CpuacctGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *CpuacctGroup) Set(_ string, _ *configs.Resources) error {
+ return nil
+}
+
+func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
+ if !cgroups.PathExists(path) {
+ return nil
+ }
+ userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
+ if err != nil {
+ return err
+ }
+
+ totalUsage, err := fscommon.GetCgroupParamUint(path, "cpuacct.usage")
+ if err != nil {
+ return err
+ }
+
+ percpuUsage, err := getPercpuUsage(path)
+ if err != nil {
+ return err
+ }
+
+ percpuUsageInKernelmode, percpuUsageInUsermode, err := getPercpuUsageInModes(path)
+ if err != nil {
+ return err
+ }
+
+ stats.CpuStats.CpuUsage.TotalUsage = totalUsage
+ stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
+ stats.CpuStats.CpuUsage.PercpuUsageInKernelmode = percpuUsageInKernelmode
+ stats.CpuStats.CpuUsage.PercpuUsageInUsermode = percpuUsageInUsermode
+ stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
+ stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
+ return nil
+}
+
+// Returns user and kernel usage breakdown in nanoseconds.
+func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
+ var userModeUsage, kernelModeUsage uint64
+ const (
+ userField = "user"
+ systemField = "system"
+ file = cgroupCpuacctStat
+ )
+
+ // Expected format:
+ // user <usage in ticks>
+ // system <usage in ticks>
+ data, err := cgroups.ReadFile(path, file)
+ if err != nil {
+ return 0, 0, err
+ }
+ // TODO: use strings.SplitN instead.
+ fields := strings.Fields(data)
+ if len(fields) < 4 || fields[0] != userField || fields[2] != systemField {
+ return 0, 0, malformedLine(path, file, data)
+ }
+ if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
+ return 0, 0, &parseError{Path: path, File: file, Err: err}
+ }
+ if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
+ return 0, 0, &parseError{Path: path, File: file, Err: err}
+ }
+
+ return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
+}
+
+func getPercpuUsage(path string) ([]uint64, error) {
+ const file = "cpuacct.usage_percpu"
+ percpuUsage := []uint64{}
+ data, err := cgroups.ReadFile(path, file)
+ if err != nil {
+ return percpuUsage, err
+ }
+ // TODO: use strings.SplitN instead.
+ for _, value := range strings.Fields(data) {
+ value, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return percpuUsage, &parseError{Path: path, File: file, Err: err}
+ }
+ percpuUsage = append(percpuUsage, value)
+ }
+ return percpuUsage, nil
+}
+
+func getPercpuUsageInModes(path string) ([]uint64, []uint64, error) {
+ usageKernelMode := []uint64{}
+ usageUserMode := []uint64{}
+ const file = cgroupCpuacctUsageAll
+
+ fd, err := cgroups.OpenFile(path, file, os.O_RDONLY)
+ if os.IsNotExist(err) {
+ return usageKernelMode, usageUserMode, nil
+ } else if err != nil {
+ return nil, nil, err
+ }
+ defer fd.Close()
+
+ scanner := bufio.NewScanner(fd)
+ scanner.Scan() // skipping header line
+
+ for scanner.Scan() {
+ lineFields := strings.SplitN(scanner.Text(), " ", cuacctUsageAllColumnsNumber+1)
+ if len(lineFields) != cuacctUsageAllColumnsNumber {
+ continue
+ }
+
+ usageInKernelMode, err := strconv.ParseUint(lineFields[kernelModeColumn], 10, 64)
+ if err != nil {
+ return nil, nil, &parseError{Path: path, File: file, Err: err}
+ }
+ usageKernelMode = append(usageKernelMode, usageInKernelMode)
+
+ usageInUserMode, err := strconv.ParseUint(lineFields[userModeColumn], 10, 64)
+ if err != nil {
+ return nil, nil, &parseError{Path: path, File: file, Err: err}
+ }
+ usageUserMode = append(usageUserMode, usageInUserMode)
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, nil, &parseError{Path: path, File: file, Err: err}
+ }
+
+ return usageKernelMode, usageUserMode, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
new file mode 100644
index 000000000..550baa427
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
@@ -0,0 +1,245 @@
+package fs
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type CpusetGroup struct{}
+
+func (s *CpusetGroup) Name() string {
+ return "cpuset"
+}
+
+func (s *CpusetGroup) Apply(path string, r *configs.Resources, pid int) error {
+ return s.ApplyDir(path, r, pid)
+}
+
+func (s *CpusetGroup) Set(path string, r *configs.Resources) error {
+ if r.CpusetCpus != "" {
+ if err := cgroups.WriteFile(path, "cpuset.cpus", r.CpusetCpus); err != nil {
+ return err
+ }
+ }
+ if r.CpusetMems != "" {
+ if err := cgroups.WriteFile(path, "cpuset.mems", r.CpusetMems); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func getCpusetStat(path string, file string) ([]uint16, error) {
+ var extracted []uint16
+ fileContent, err := fscommon.GetCgroupParamString(path, file)
+ if err != nil {
+ return extracted, err
+ }
+ if len(fileContent) == 0 {
+ return extracted, &parseError{Path: path, File: file, Err: errors.New("empty file")}
+ }
+
+ for _, s := range strings.Split(fileContent, ",") {
+ sp := strings.SplitN(s, "-", 3)
+ switch len(sp) {
+ case 3:
+ return extracted, &parseError{Path: path, File: file, Err: errors.New("extra dash")}
+ case 2:
+ min, err := strconv.ParseUint(sp[0], 10, 16)
+ if err != nil {
+ return extracted, &parseError{Path: path, File: file, Err: err}
+ }
+ max, err := strconv.ParseUint(sp[1], 10, 16)
+ if err != nil {
+ return extracted, &parseError{Path: path, File: file, Err: err}
+ }
+ if min > max {
+ return extracted, &parseError{Path: path, File: file, Err: errors.New("invalid values, min > max")}
+ }
+ for i := min; i <= max; i++ {
+ extracted = append(extracted, uint16(i))
+ }
+ case 1:
+ value, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ return extracted, &parseError{Path: path, File: file, Err: err}
+ }
+ extracted = append(extracted, uint16(value))
+ }
+ }
+
+ return extracted, nil
+}
+
+func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
+ var err error
+
+ stats.CPUSetStats.CPUs, err = getCpusetStat(path, "cpuset.cpus")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.CPUExclusive, err = fscommon.GetCgroupParamUint(path, "cpuset.cpu_exclusive")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.Mems, err = getCpusetStat(path, "cpuset.mems")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.MemHardwall, err = fscommon.GetCgroupParamUint(path, "cpuset.mem_hardwall")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.MemExclusive, err = fscommon.GetCgroupParamUint(path, "cpuset.mem_exclusive")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.MemoryMigrate, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_migrate")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.MemorySpreadPage, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_spread_page")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.MemorySpreadSlab, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_spread_slab")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.MemoryPressure, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_pressure")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.SchedLoadBalance, err = fscommon.GetCgroupParamUint(path, "cpuset.sched_load_balance")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ stats.CPUSetStats.SchedRelaxDomainLevel, err = fscommon.GetCgroupParamInt(path, "cpuset.sched_relax_domain_level")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpusetGroup) ApplyDir(dir string, r *configs.Resources, pid int) error {
+ // This might happen if we have no cpuset cgroup mounted.
+ // Just do nothing and don't fail.
+ if dir == "" {
+ return nil
+ }
+ // 'ensureParent' start with parent because we don't want to
+ // explicitly inherit from parent, it could conflict with
+ // 'cpuset.cpu_exclusive'.
+ if err := cpusetEnsureParent(filepath.Dir(dir)); err != nil {
+ return err
+ }
+ if err := os.Mkdir(dir, 0o755); err != nil && !os.IsExist(err) {
+ return err
+ }
+ // We didn't inherit cpuset configs from parent, but we have
+ // to ensure cpuset configs are set before moving task into the
+ // cgroup.
+ // The logic is, if user specified cpuset configs, use these
+ // specified configs, otherwise, inherit from parent. This makes
+ // cpuset configs work correctly with 'cpuset.cpu_exclusive', and
+ // keep backward compatibility.
+ if err := s.ensureCpusAndMems(dir, r); err != nil {
+ return err
+ }
+ // Since we are not using apply(), we need to place the pid
+ // into the procs file.
+ return cgroups.WriteCgroupProc(dir, pid)
+}
+
+func getCpusetSubsystemSettings(parent string) (cpus, mems string, err error) {
+ if cpus, err = cgroups.ReadFile(parent, "cpuset.cpus"); err != nil {
+ return
+ }
+ if mems, err = cgroups.ReadFile(parent, "cpuset.mems"); err != nil {
+ return
+ }
+ return cpus, mems, nil
+}
+
+// cpusetEnsureParent makes sure that the parent directories of current
+// are created and populated with the proper cpus and mems files copied
+// from their respective parent. It does that recursively, starting from
+// the top of the cpuset hierarchy (i.e. cpuset cgroup mount point).
+func cpusetEnsureParent(current string) error {
+ var st unix.Statfs_t
+
+ parent := filepath.Dir(current)
+ err := unix.Statfs(parent, &st)
+ if err == nil && st.Type != unix.CGROUP_SUPER_MAGIC {
+ return nil
+ }
+ // Treat non-existing directory as cgroupfs as it will be created,
+ // and the root cpuset directory obviously exists.
+ if err != nil && err != unix.ENOENT { //nolint:errorlint // unix errors are bare
+ return &os.PathError{Op: "statfs", Path: parent, Err: err}
+ }
+
+ if err := cpusetEnsureParent(parent); err != nil {
+ return err
+ }
+ if err := os.Mkdir(current, 0o755); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return cpusetCopyIfNeeded(current, parent)
+}
+
+// cpusetCopyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
+// directory to the current directory if the file's contents are 0
+func cpusetCopyIfNeeded(current, parent string) error {
+ currentCpus, currentMems, err := getCpusetSubsystemSettings(current)
+ if err != nil {
+ return err
+ }
+ parentCpus, parentMems, err := getCpusetSubsystemSettings(parent)
+ if err != nil {
+ return err
+ }
+
+ if isEmptyCpuset(currentCpus) {
+ if err := cgroups.WriteFile(current, "cpuset.cpus", parentCpus); err != nil {
+ return err
+ }
+ }
+ if isEmptyCpuset(currentMems) {
+ if err := cgroups.WriteFile(current, "cpuset.mems", parentMems); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func isEmptyCpuset(str string) bool {
+ return str == "" || str == "\n"
+}
+
+func (s *CpusetGroup) ensureCpusAndMems(path string, r *configs.Resources) error {
+ if err := s.Set(path, r); err != nil {
+ return err
+ }
+ return cpusetCopyIfNeeded(path, filepath.Dir(path))
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
new file mode 100644
index 000000000..0bf3d9deb
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
@@ -0,0 +1,39 @@
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type DevicesGroup struct{}
+
+func (s *DevicesGroup) Name() string {
+ return "devices"
+}
+
+func (s *DevicesGroup) Apply(path string, r *configs.Resources, pid int) error {
+ if r.SkipDevices {
+ return nil
+ }
+ if path == "" {
+ // Return error here, since devices cgroup
+ // is a hard requirement for container's security.
+ return errSubsystemDoesNotExist
+ }
+
+ return apply(path, pid)
+}
+
+func (s *DevicesGroup) Set(path string, r *configs.Resources) error {
+ if cgroups.DevicesSetV1 == nil {
+ if len(r.Devices) == 0 {
+ return nil
+ }
+ return cgroups.ErrDevicesUnsupported
+ }
+ return cgroups.DevicesSetV1(path, r)
+}
+
+func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go
new file mode 100644
index 000000000..f2ab6f130
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go
@@ -0,0 +1,15 @@
+package fs
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+)
+
+type parseError = fscommon.ParseError
+
+// malformedLine is used by all cgroupfs file parsers that expect a line
+// in a particular format but get some garbage instead.
+func malformedLine(path, file, line string) error {
+ return &parseError{Path: path, File: file, Err: fmt.Errorf("malformed line: %s", line)}
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
new file mode 100644
index 000000000..987f1bf5e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
@@ -0,0 +1,158 @@
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+type FreezerGroup struct{}
+
+func (s *FreezerGroup) Name() string {
+ return "freezer"
+}
+
+func (s *FreezerGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *FreezerGroup) Set(path string, r *configs.Resources) (Err error) {
+ switch r.Freezer {
+ case configs.Frozen:
+ defer func() {
+ if Err != nil {
+ // Freezing failed, and it is bad and dangerous
+ // to leave the cgroup in FROZEN or FREEZING
+ // state, so (try to) thaw it back.
+ _ = cgroups.WriteFile(path, "freezer.state", string(configs.Thawed))
+ }
+ }()
+
+ // As per older kernel docs (freezer-subsystem.txt before
+ // kernel commit ef9fe980c6fcc1821), if FREEZING is seen,
+ // userspace should either retry or thaw. While current
+ // kernel cgroup v1 docs no longer mention a need to retry,
+ // even a recent kernel (v5.4, Ubuntu 20.04) can't reliably
+ // freeze a cgroup v1 while new processes keep appearing in it
+ // (either via fork/clone or by writing new PIDs to
+ // cgroup.procs).
+ //
+ // The numbers below are empirically chosen to have a decent
+ // chance to succeed in various scenarios ("runc pause/unpause
+ // with parallel runc exec" and "bare freeze/unfreeze on a very
+ // slow system"), tested on RHEL7 and Ubuntu 20.04 kernels.
+ //
+ // Adding any amount of sleep in between retries did not
+ // increase the chances of successful freeze in "pause/unpause
+ // with parallel exec" reproducer. OTOH, adding an occasional
+ // sleep helped for the case where the system is extremely slow
+ // (CentOS 7 VM on GHA CI).
+ //
+ // Alas, this is still a game of chances, since the real fix
+ // belong to the kernel (cgroup v2 do not have this bug).
+
+ for i := 0; i < 1000; i++ {
+ if i%50 == 49 {
+ // Occasional thaw and sleep improves
+ // the chances to succeed in freezing
+ // in case new processes keep appearing
+ // in the cgroup.
+ _ = cgroups.WriteFile(path, "freezer.state", string(configs.Thawed))
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ if err := cgroups.WriteFile(path, "freezer.state", string(configs.Frozen)); err != nil {
+ return err
+ }
+
+ if i%25 == 24 {
+ // Occasional short sleep before reading
+ // the state back also improves the chances to
+ // succeed in freezing in case of a very slow
+ // system.
+ time.Sleep(10 * time.Microsecond)
+ }
+ state, err := cgroups.ReadFile(path, "freezer.state")
+ if err != nil {
+ return err
+ }
+ state = strings.TrimSpace(state)
+ switch state {
+ case "FREEZING":
+ continue
+ case string(configs.Frozen):
+ if i > 1 {
+ logrus.Debugf("frozen after %d retries", i)
+ }
+ return nil
+ default:
+ // should never happen
+ return fmt.Errorf("unexpected state %s while freezing", strings.TrimSpace(state))
+ }
+ }
+ // Despite our best efforts, it got stuck in FREEZING.
+ return errors.New("unable to freeze")
+ case configs.Thawed:
+ return cgroups.WriteFile(path, "freezer.state", string(configs.Thawed))
+ case configs.Undefined:
+ return nil
+ default:
+ return fmt.Errorf("Invalid argument '%s' to freezer.state", string(r.Freezer))
+ }
+}
+
+func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
+
+func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) {
+ for {
+ state, err := cgroups.ReadFile(path, "freezer.state")
+ if err != nil {
+ // If the kernel is too old, then we just treat the freezer as
+ // being in an "undefined" state.
+ if os.IsNotExist(err) || errors.Is(err, unix.ENODEV) {
+ err = nil
+ }
+ return configs.Undefined, err
+ }
+ switch strings.TrimSpace(state) {
+ case "THAWED":
+ return configs.Thawed, nil
+ case "FROZEN":
+ // Find out whether the cgroup is frozen directly,
+ // or indirectly via an ancestor.
+ self, err := cgroups.ReadFile(path, "freezer.self_freezing")
+ if err != nil {
+ // If the kernel is too old, then we just treat
+ // it as being frozen.
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.ENODEV) {
+ err = nil
+ }
+ return configs.Frozen, err
+ }
+ switch self {
+ case "0\n":
+ return configs.Thawed, nil
+ case "1\n":
+ return configs.Frozen, nil
+ default:
+ return configs.Undefined, fmt.Errorf(`unknown "freezer.self_freezing" state: %q`, self)
+ }
+ case "FREEZING":
+ // Make sure we get a stable freezer state, so retry if the cgroup
+ // is still undergoing freezing. This should be a temporary delay.
+ time.Sleep(1 * time.Millisecond)
+ continue
+ default:
+ return configs.Undefined, fmt.Errorf("unknown freezer.state %q", state)
+ }
+ }
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go
new file mode 100644
index 000000000..be4dcc341
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go
@@ -0,0 +1,264 @@
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var subsystems = []subsystem{
+ &CpusetGroup{},
+ &DevicesGroup{},
+ &MemoryGroup{},
+ &CpuGroup{},
+ &CpuacctGroup{},
+ &PidsGroup{},
+ &BlkioGroup{},
+ &HugetlbGroup{},
+ &NetClsGroup{},
+ &NetPrioGroup{},
+ &PerfEventGroup{},
+ &FreezerGroup{},
+ &RdmaGroup{},
+ &NameGroup{GroupName: "name=systemd", Join: true},
+}
+
+var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
+
+func init() {
+ // If using cgroups-hybrid mode then add a "" controller indicating
+ // it should join the cgroups v2.
+ if cgroups.IsCgroup2HybridMode() {
+ subsystems = append(subsystems, &NameGroup{GroupName: "", Join: true})
+ }
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+ // GetStats fills in the stats for the subsystem.
+ GetStats(path string, stats *cgroups.Stats) error
+ // Apply creates and joins a cgroup, adding pid into it. Some
+ // subsystems use resources to pre-configure the cgroup parents
+ // before creating or joining it.
+ Apply(path string, r *configs.Resources, pid int) error
+ // Set sets the cgroup resources.
+ Set(path string, r *configs.Resources) error
+}
+
+type manager struct {
+ mu sync.Mutex
+ cgroups *configs.Cgroup
+ paths map[string]string
+}
+
+func NewManager(cg *configs.Cgroup, paths map[string]string) (cgroups.Manager, error) {
+ // Some v1 controllers (cpu, cpuset, and devices) expect
+ // cgroups.Resources to not be nil in Apply.
+ if cg.Resources == nil {
+ return nil, errors.New("cgroup v1 manager needs configs.Resources to be set during manager creation")
+ }
+ if cg.Resources.Unified != nil {
+ return nil, cgroups.ErrV1NoUnified
+ }
+
+ if paths == nil {
+ var err error
+ paths, err = initPaths(cg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &manager{
+ cgroups: cg,
+ paths: paths,
+ }, nil
+}
+
+// isIgnorableError returns whether err is a permission error (in the loose
+// sense of the word). This includes EROFS (which for an unprivileged user is
+// basically a permission error) and EACCES (for similar reasons) as well as
+// the normal EPERM.
+func isIgnorableError(rootless bool, err error) bool {
+ // We do not ignore errors if we are root.
+ if !rootless {
+ return false
+ }
+ // Is it an ordinary EPERM?
+ if errors.Is(err, os.ErrPermission) {
+ return true
+ }
+ // Handle some specific syscall errors.
+ var errno unix.Errno
+ if errors.As(err, &errno) {
+ return errno == unix.EROFS || errno == unix.EPERM || errno == unix.EACCES
+ }
+ return false
+}
+
+func (m *manager) Apply(pid int) (err error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ c := m.cgroups
+
+ for _, sys := range subsystems {
+ name := sys.Name()
+ p, ok := m.paths[name]
+ if !ok {
+ continue
+ }
+
+ if err := sys.Apply(p, c.Resources, pid); err != nil {
+ // In the case of rootless (including euid=0 in userns), where an
+ // explicit cgroup path hasn't been set, we don't bail on error in
+ // case of permission problems here, but do delete the path from
+ // the m.paths map, since it is either non-existent and could not
+ // be created, or the pid could not be added to it.
+ //
+ // Cases where limits for the subsystem have been set are handled
+ // later by Set, which fails with a friendly error (see
+ // if path == "" in Set).
+ if isIgnorableError(c.Rootless, err) && c.Path == "" {
+ delete(m.paths, name)
+ continue
+ }
+ return err
+ }
+
+ }
+ return nil
+}
+
+func (m *manager) Destroy() error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return cgroups.RemovePaths(m.paths)
+}
+
+func (m *manager) Path(subsys string) string {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.paths[subsys]
+}
+
+func (m *manager) GetStats() (*cgroups.Stats, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ stats := cgroups.NewStats()
+ for _, sys := range subsystems {
+ path := m.paths[sys.Name()]
+ if path == "" {
+ continue
+ }
+ if err := sys.GetStats(path, stats); err != nil {
+ return nil, err
+ }
+ }
+ return stats, nil
+}
+
+func (m *manager) Set(r *configs.Resources) error {
+ if r == nil {
+ return nil
+ }
+
+ if r.Unified != nil {
+ return cgroups.ErrV1NoUnified
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ for _, sys := range subsystems {
+ path := m.paths[sys.Name()]
+ if err := sys.Set(path, r); err != nil {
+ // When rootless is true, errors from the device subsystem
+ // are ignored, as it is really not expected to work.
+ if m.cgroups.Rootless && sys.Name() == "devices" && !errors.Is(err, cgroups.ErrDevicesUnsupported) {
+ continue
+ }
+ // However, errors from other subsystems are not ignored.
+ // see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
+ if path == "" {
+ // We never created a path for this cgroup, so we cannot set
+ // limits for it (though we have already tried at this point).
+ return fmt.Errorf("cannot set %s limit: container could not join or create cgroup", sys.Name())
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Freeze toggles the container's freezer cgroup depending on the state
+// provided
+func (m *manager) Freeze(state configs.FreezerState) error {
+ path := m.Path("freezer")
+ if path == "" {
+ return errors.New("cannot toggle freezer: cgroups not configured for container")
+ }
+
+ prevState := m.cgroups.Resources.Freezer
+ m.cgroups.Resources.Freezer = state
+ freezer := &FreezerGroup{}
+ if err := freezer.Set(path, m.cgroups.Resources); err != nil {
+ m.cgroups.Resources.Freezer = prevState
+ return err
+ }
+ return nil
+}
+
+func (m *manager) GetPids() ([]int, error) {
+ return cgroups.GetPids(m.Path("devices"))
+}
+
+func (m *manager) GetAllPids() ([]int, error) {
+ return cgroups.GetAllPids(m.Path("devices"))
+}
+
+func (m *manager) GetPaths() map[string]string {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.paths
+}
+
+func (m *manager) GetCgroups() (*configs.Cgroup, error) {
+ return m.cgroups, nil
+}
+
+func (m *manager) GetFreezerState() (configs.FreezerState, error) {
+ dir := m.Path("freezer")
+ // If the container doesn't have the freezer cgroup, say it's undefined.
+ if dir == "" {
+ return configs.Undefined, nil
+ }
+ freezer := &FreezerGroup{}
+ return freezer.GetState(dir)
+}
+
+func (m *manager) Exists() bool {
+ return cgroups.PathExists(m.Path("devices"))
+}
+
+func OOMKillCount(path string) (uint64, error) {
+ return fscommon.GetValueByKey(path, "memory.oom_control", "oom_kill")
+}
+
+func (m *manager) OOMKillCount() (uint64, error) {
+ c, err := OOMKillCount(m.Path("memory"))
+ // Ignore ENOENT when rootless as it couldn't create cgroup.
+ if err != nil && m.cgroups.Rootless && os.IsNotExist(err) {
+ err = nil
+ }
+
+ return c, err
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
new file mode 100644
index 000000000..8ddd6fdd8
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
@@ -0,0 +1,62 @@
+package fs
+
+import (
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type HugetlbGroup struct{}
+
+func (s *HugetlbGroup) Name() string {
+ return "hugetlb"
+}
+
+func (s *HugetlbGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *HugetlbGroup) Set(path string, r *configs.Resources) error {
+ for _, hugetlb := range r.HugetlbLimit {
+ if err := cgroups.WriteFile(path, "hugetlb."+hugetlb.Pagesize+".limit_in_bytes", strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
+ if !cgroups.PathExists(path) {
+ return nil
+ }
+ hugetlbStats := cgroups.HugetlbStats{}
+ for _, pageSize := range cgroups.HugePageSizes() {
+ usage := "hugetlb." + pageSize + ".usage_in_bytes"
+ value, err := fscommon.GetCgroupParamUint(path, usage)
+ if err != nil {
+ return err
+ }
+ hugetlbStats.Usage = value
+
+ maxUsage := "hugetlb." + pageSize + ".max_usage_in_bytes"
+ value, err = fscommon.GetCgroupParamUint(path, maxUsage)
+ if err != nil {
+ return err
+ }
+ hugetlbStats.MaxUsage = value
+
+ failcnt := "hugetlb." + pageSize + ".failcnt"
+ value, err = fscommon.GetCgroupParamUint(path, failcnt)
+ if err != nil {
+ return err
+ }
+ hugetlbStats.Failcnt = value
+
+ stats.HugetlbStats[pageSize] = hugetlbStats
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
new file mode 100644
index 000000000..b7c75f941
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
@@ -0,0 +1,348 @@
+package fs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+const (
+ cgroupMemorySwapLimit = "memory.memsw.limit_in_bytes"
+ cgroupMemoryLimit = "memory.limit_in_bytes"
+ cgroupMemoryUsage = "memory.usage_in_bytes"
+ cgroupMemoryMaxUsage = "memory.max_usage_in_bytes"
+)
+
+type MemoryGroup struct{}
+
+func (s *MemoryGroup) Name() string {
+ return "memory"
+}
+
+func (s *MemoryGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func setMemory(path string, val int64) error {
+ if val == 0 {
+ return nil
+ }
+
+ err := cgroups.WriteFile(path, cgroupMemoryLimit, strconv.FormatInt(val, 10))
+ if !errors.Is(err, unix.EBUSY) {
+ return err
+ }
+
+ // EBUSY means the kernel can't set new limit as it's too low
+ // (lower than the current usage). Return more specific error.
+ usage, err := fscommon.GetCgroupParamUint(path, cgroupMemoryUsage)
+ if err != nil {
+ return err
+ }
+ max, err := fscommon.GetCgroupParamUint(path, cgroupMemoryMaxUsage)
+ if err != nil {
+ return err
+ }
+
+ return fmt.Errorf("unable to set memory limit to %d (current usage: %d, peak usage: %d)", val, usage, max)
+}
+
+func setSwap(path string, val int64) error {
+ if val == 0 {
+ return nil
+ }
+
+ return cgroups.WriteFile(path, cgroupMemorySwapLimit, strconv.FormatInt(val, 10))
+}
+
+func setMemoryAndSwap(path string, r *configs.Resources) error {
+ // If the memory update is set to -1 and the swap is not explicitly
+ // set, we should also set swap to -1, it means unlimited memory.
+ if r.Memory == -1 && r.MemorySwap == 0 {
+ // Only set swap if it's enabled in kernel
+ if cgroups.PathExists(filepath.Join(path, cgroupMemorySwapLimit)) {
+ r.MemorySwap = -1
+ }
+ }
+
+ // When memory and swap memory are both set, we need to handle the cases
+ // for updating container.
+ if r.Memory != 0 && r.MemorySwap != 0 {
+ curLimit, err := fscommon.GetCgroupParamUint(path, cgroupMemoryLimit)
+ if err != nil {
+ return err
+ }
+
+ // When update memory limit, we should adapt the write sequence
+ // for memory and swap memory, so it won't fail because the new
+ // value and the old value don't fit kernel's validation.
+ if r.MemorySwap == -1 || curLimit < uint64(r.MemorySwap) {
+ if err := setSwap(path, r.MemorySwap); err != nil {
+ return err
+ }
+ if err := setMemory(path, r.Memory); err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+
+ if err := setMemory(path, r.Memory); err != nil {
+ return err
+ }
+ if err := setSwap(path, r.MemorySwap); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Set(path string, r *configs.Resources) error {
+ if err := setMemoryAndSwap(path, r); err != nil {
+ return err
+ }
+
+ // ignore KernelMemory and KernelMemoryTCP
+
+ if r.MemoryReservation != 0 {
+ if err := cgroups.WriteFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(r.MemoryReservation, 10)); err != nil {
+ return err
+ }
+ }
+
+ if r.OomKillDisable {
+ if err := cgroups.WriteFile(path, "memory.oom_control", "1"); err != nil {
+ return err
+ }
+ }
+ if r.MemorySwappiness == nil || int64(*r.MemorySwappiness) == -1 {
+ return nil
+ } else if *r.MemorySwappiness <= 100 {
+ if err := cgroups.WriteFile(path, "memory.swappiness", strconv.FormatUint(*r.MemorySwappiness, 10)); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("invalid memory swappiness value: %d (valid range is 0-100)", *r.MemorySwappiness)
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
+ const file = "memory.stat"
+ statsFile, err := cgroups.OpenFile(path, file, os.O_RDONLY)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer statsFile.Close()
+
+ sc := bufio.NewScanner(statsFile)
+ for sc.Scan() {
+ t, v, err := fscommon.ParseKeyValue(sc.Text())
+ if err != nil {
+ return &parseError{Path: path, File: file, Err: err}
+ }
+ stats.MemoryStats.Stats[t] = v
+ }
+ stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
+
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.Usage = memoryUsage
+ swapUsage, err := getMemoryData(path, "memsw")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.SwapUsage = swapUsage
+ kernelUsage, err := getMemoryData(path, "kmem")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelUsage = kernelUsage
+ kernelTCPUsage, err := getMemoryData(path, "kmem.tcp")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelTCPUsage = kernelTCPUsage
+
+ value, err := fscommon.GetCgroupParamUint(path, "memory.use_hierarchy")
+ if err != nil {
+ return err
+ }
+ if value == 1 {
+ stats.MemoryStats.UseHierarchy = true
+ }
+
+ pagesByNUMA, err := getPageUsageByNUMA(path)
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.PageUsageByNUMA = pagesByNUMA
+
+ return nil
+}
+
+func getMemoryData(path, name string) (cgroups.MemoryData, error) {
+ memoryData := cgroups.MemoryData{}
+
+ moduleName := "memory"
+ if name != "" {
+ moduleName = "memory." + name
+ }
+ var (
+ usage = moduleName + ".usage_in_bytes"
+ maxUsage = moduleName + ".max_usage_in_bytes"
+ failcnt = moduleName + ".failcnt"
+ limit = moduleName + ".limit_in_bytes"
+ )
+
+ value, err := fscommon.GetCgroupParamUint(path, usage)
+ if err != nil {
+ if name != "" && os.IsNotExist(err) {
+ // Ignore ENOENT as swap and kmem controllers
+ // are optional in the kernel.
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, err
+ }
+ memoryData.Usage = value
+ value, err = fscommon.GetCgroupParamUint(path, maxUsage)
+ if err != nil {
+ return cgroups.MemoryData{}, err
+ }
+ memoryData.MaxUsage = value
+ value, err = fscommon.GetCgroupParamUint(path, failcnt)
+ if err != nil {
+ return cgroups.MemoryData{}, err
+ }
+ memoryData.Failcnt = value
+ value, err = fscommon.GetCgroupParamUint(path, limit)
+ if err != nil {
+ return cgroups.MemoryData{}, err
+ }
+ memoryData.Limit = value
+
+ return memoryData, nil
+}
+
+func getPageUsageByNUMA(path string) (cgroups.PageUsageByNUMA, error) {
+ const (
+ maxColumns = math.MaxUint8 + 1
+ file = "memory.numa_stat"
+ )
+ stats := cgroups.PageUsageByNUMA{}
+
+ fd, err := cgroups.OpenFile(path, file, os.O_RDONLY)
+ if os.IsNotExist(err) {
+ return stats, nil
+ } else if err != nil {
+ return stats, err
+ }
+ defer fd.Close()
+
+ // File format is documented in linux/Documentation/cgroup-v1/memory.txt
+ // and it looks like this:
+ //
+ // total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
+ // file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
+ // anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
+ // unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
+ // hierarchical_<counter>=<counter pages> N0=<node 0 pages> N1=<node 1 pages> ...
+
+ scanner := bufio.NewScanner(fd)
+ for scanner.Scan() {
+ var field *cgroups.PageStats
+
+ line := scanner.Text()
+ columns := strings.SplitN(line, " ", maxColumns)
+ for i, column := range columns {
+ byNode := strings.SplitN(column, "=", 2)
+ // Some custom kernels have non-standard fields, like
+ // numa_locality 0 0 0 0 0 0 0 0 0 0
+ // numa_exectime 0
+ if len(byNode) < 2 {
+ if i == 0 {
+ // Ignore/skip those.
+ break
+ } else {
+ // The first column was already validated,
+ // so be strict to the rest.
+ return stats, malformedLine(path, file, line)
+ }
+ }
+ key, val := byNode[0], byNode[1]
+ if i == 0 { // First column: key is name, val is total.
+ field = getNUMAField(&stats, key)
+ if field == nil { // unknown field (new kernel?)
+ break
+ }
+ field.Total, err = strconv.ParseUint(val, 0, 64)
+ if err != nil {
+ return stats, &parseError{Path: path, File: file, Err: err}
+ }
+ field.Nodes = map[uint8]uint64{}
+ } else { // Subsequent columns: key is N<id>, val is usage.
+ if len(key) < 2 || key[0] != 'N' {
+ // This is definitely an error.
+ return stats, malformedLine(path, file, line)
+ }
+
+ n, err := strconv.ParseUint(key[1:], 10, 8)
+ if err != nil {
+ return stats, &parseError{Path: path, File: file, Err: err}
+ }
+
+ usage, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return stats, &parseError{Path: path, File: file, Err: err}
+ }
+
+ field.Nodes[uint8(n)] = usage
+ }
+
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return cgroups.PageUsageByNUMA{}, &parseError{Path: path, File: file, Err: err}
+ }
+
+ return stats, nil
+}
+
+func getNUMAField(stats *cgroups.PageUsageByNUMA, name string) *cgroups.PageStats {
+ switch name {
+ case "total":
+ return &stats.Total
+ case "file":
+ return &stats.File
+ case "anon":
+ return &stats.Anon
+ case "unevictable":
+ return &stats.Unevictable
+ case "hierarchical_total":
+ return &stats.Hierarchical.Total
+ case "hierarchical_file":
+ return &stats.Hierarchical.File
+ case "hierarchical_anon":
+ return &stats.Hierarchical.Anon
+ case "hierarchical_unevictable":
+ return &stats.Hierarchical.Unevictable
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
new file mode 100644
index 000000000..b8d5d849c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
@@ -0,0 +1,31 @@
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NameGroup struct {
+ GroupName string
+ Join bool
+}
+
+func (s *NameGroup) Name() string {
+ return s.GroupName
+}
+
+func (s *NameGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ if s.Join {
+ // Ignore errors if the named cgroup does not exist.
+ _ = apply(path, pid)
+ }
+ return nil
+}
+
+func (s *NameGroup) Set(_ string, _ *configs.Resources) error {
+ return nil
+}
+
+func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
new file mode 100644
index 000000000..abfd09ce8
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
@@ -0,0 +1,32 @@
+package fs
+
+import (
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetClsGroup struct{}
+
+func (s *NetClsGroup) Name() string {
+ return "net_cls"
+}
+
+func (s *NetClsGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *NetClsGroup) Set(path string, r *configs.Resources) error {
+ if r.NetClsClassid != 0 {
+ if err := cgroups.WriteFile(path, "net_cls.classid", strconv.FormatUint(uint64(r.NetClsClassid), 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
new file mode 100644
index 000000000..da74d3779
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
@@ -0,0 +1,30 @@
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetPrioGroup struct{}
+
+func (s *NetPrioGroup) Name() string {
+ return "net_prio"
+}
+
+func (s *NetPrioGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *NetPrioGroup) Set(path string, r *configs.Resources) error {
+ for _, prioMap := range r.NetPrioIfpriomap {
+ if err := cgroups.WriteFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go
new file mode 100644
index 000000000..1092331b2
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go
@@ -0,0 +1,186 @@
+package fs
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+// The absolute path to the root of the cgroup hierarchies.
+var (
+ cgroupRootLock sync.Mutex
+ cgroupRoot string
+)
+
+const defaultCgroupRoot = "/sys/fs/cgroup"
+
+func initPaths(cg *configs.Cgroup) (map[string]string, error) {
+ root, err := rootPath()
+ if err != nil {
+ return nil, err
+ }
+
+ inner, err := innerPath(cg)
+ if err != nil {
+ return nil, err
+ }
+
+ paths := make(map[string]string)
+ for _, sys := range subsystems {
+ name := sys.Name()
+ path, err := subsysPath(root, inner, name)
+ if err != nil {
+ // The non-presence of the devices subsystem
+ // is considered fatal for security reasons.
+ if cgroups.IsNotFound(err) && (cg.SkipDevices || name != "devices") {
+ continue
+ }
+
+ return nil, err
+ }
+ paths[name] = path
+ }
+
+ return paths, nil
+}
+
+func tryDefaultCgroupRoot() string {
+ var st, pst unix.Stat_t
+
+ // (1) it should be a directory...
+ err := unix.Lstat(defaultCgroupRoot, &st)
+ if err != nil || st.Mode&unix.S_IFDIR == 0 {
+ return ""
+ }
+
+ // (2) ... and a mount point ...
+ err = unix.Lstat(filepath.Dir(defaultCgroupRoot), &pst)
+ if err != nil {
+ return ""
+ }
+
+ if st.Dev == pst.Dev {
+ // parent dir has the same dev -- not a mount point
+ return ""
+ }
+
+ // (3) ... of 'tmpfs' fs type.
+ var fst unix.Statfs_t
+ err = unix.Statfs(defaultCgroupRoot, &fst)
+ if err != nil || fst.Type != unix.TMPFS_MAGIC {
+ return ""
+ }
+
+ // (4) it should have at least 1 entry ...
+ dir, err := os.Open(defaultCgroupRoot)
+ if err != nil {
+ return ""
+ }
+ names, err := dir.Readdirnames(1)
+ if err != nil {
+ return ""
+ }
+ if len(names) < 1 {
+ return ""
+ }
+ // ... which is a cgroup mount point.
+ err = unix.Statfs(filepath.Join(defaultCgroupRoot, names[0]), &fst)
+ if err != nil || fst.Type != unix.CGROUP_SUPER_MAGIC {
+ return ""
+ }
+
+ return defaultCgroupRoot
+}
+
+// rootPath finds and returns path to the root of the cgroup hierarchies.
+func rootPath() (string, error) {
+ cgroupRootLock.Lock()
+ defer cgroupRootLock.Unlock()
+
+ if cgroupRoot != "" {
+ return cgroupRoot, nil
+ }
+
+ // fast path
+ cgroupRoot = tryDefaultCgroupRoot()
+ if cgroupRoot != "" {
+ return cgroupRoot, nil
+ }
+
+ // slow path: parse mountinfo
+ mi, err := cgroups.GetCgroupMounts(false)
+ if err != nil {
+ return "", err
+ }
+ if len(mi) < 1 {
+ return "", errors.New("no cgroup mount found in mountinfo")
+ }
+
+ // Get the first cgroup mount (e.g. "/sys/fs/cgroup/memory"),
+ // use its parent directory.
+ root := filepath.Dir(mi[0].Mountpoint)
+
+ if _, err := os.Stat(root); err != nil {
+ return "", err
+ }
+
+ cgroupRoot = root
+ return cgroupRoot, nil
+}
+
+func innerPath(c *configs.Cgroup) (string, error) {
+ if (c.Name != "" || c.Parent != "") && c.Path != "" {
+ return "", errors.New("cgroup: either Path or Name and Parent should be used")
+ }
+
+ // XXX: Do not remove CleanPath. Path safety is important! -- cyphar
+ innerPath := utils.CleanPath(c.Path)
+ if innerPath == "" {
+ cgParent := utils.CleanPath(c.Parent)
+ cgName := utils.CleanPath(c.Name)
+ innerPath = filepath.Join(cgParent, cgName)
+ }
+
+ return innerPath, nil
+}
+
+func subsysPath(root, inner, subsystem string) (string, error) {
+ // If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
+ if filepath.IsAbs(inner) {
+ mnt, err := cgroups.FindCgroupMountpoint(root, subsystem)
+ // If we didn't mount the subsystem, there is no point we make the path.
+ if err != nil {
+ return "", err
+ }
+
+ // Sometimes subsystems can be mounted together as 'cpu,cpuacct'.
+ return filepath.Join(root, filepath.Base(mnt), inner), nil
+ }
+
+ // Use GetOwnCgroupPath instead of GetInitCgroupPath, because the creating
+ // process could in container and shared pid namespace with host, and
+ // /proc/1/cgroup could point to whole other world of cgroups.
+ parentPath, err := cgroups.GetOwnCgroupPath(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(parentPath, inner), nil
+}
+
+func apply(path string, pid int) error {
+ if path == "" {
+ return nil
+ }
+ if err := os.MkdirAll(path, 0o755); err != nil {
+ return err
+ }
+ return cgroups.WriteCgroupProc(path, pid)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
new file mode 100644
index 000000000..b86955c8f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
@@ -0,0 +1,24 @@
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PerfEventGroup struct{}
+
+func (s *PerfEventGroup) Name() string {
+ return "perf_event"
+}
+
+func (s *PerfEventGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *PerfEventGroup) Set(_ string, _ *configs.Resources) error {
+ return nil
+}
+
+func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
new file mode 100644
index 000000000..1f13532a5
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
@@ -0,0 +1,62 @@
+package fs
+
+import (
+ "math"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PidsGroup struct{}
+
+func (s *PidsGroup) Name() string {
+ return "pids"
+}
+
+func (s *PidsGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *PidsGroup) Set(path string, r *configs.Resources) error {
+ if r.PidsLimit != 0 {
+ // "max" is the fallback value.
+ limit := "max"
+
+ if r.PidsLimit > 0 {
+ limit = strconv.FormatInt(r.PidsLimit, 10)
+ }
+
+ if err := cgroups.WriteFile(path, "pids.max", limit); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ if !cgroups.PathExists(path) {
+ return nil
+ }
+ current, err := fscommon.GetCgroupParamUint(path, "pids.current")
+ if err != nil {
+ return err
+ }
+
+ max, err := fscommon.GetCgroupParamUint(path, "pids.max")
+ if err != nil {
+ return err
+ }
+ // If no limit is set, read from pids.max returns "max", which is
+ // converted to MaxUint64 by GetCgroupParamUint. Historically, we
+ // represent "no limit" for pids as 0, thus this conversion.
+ if max == math.MaxUint64 {
+ max = 0
+ }
+
+ stats.PidsStats.Current = current
+ stats.PidsStats.Limit = max
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go
new file mode 100644
index 000000000..5bbe0f35f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go
@@ -0,0 +1,25 @@
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type RdmaGroup struct{}
+
+func (s *RdmaGroup) Name() string {
+ return "rdma"
+}
+
+func (s *RdmaGroup) Apply(path string, _ *configs.Resources, pid int) error {
+ return apply(path, pid)
+}
+
+func (s *RdmaGroup) Set(path string, r *configs.Resources) error {
+ return fscommon.RdmaSet(path, r)
+}
+
+func (s *RdmaGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return fscommon.RdmaGetStats(path, stats)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go
new file mode 100644
index 000000000..bbbae4d58
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go
@@ -0,0 +1,87 @@
+package fs2
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func isCpuSet(r *configs.Resources) bool {
+ return r.CpuWeight != 0 || r.CpuQuota != 0 || r.CpuPeriod != 0
+}
+
+func setCpu(dirPath string, r *configs.Resources) error {
+ if !isCpuSet(r) {
+ return nil
+ }
+
+ // NOTE: .CpuShares is not used here. Conversion is the caller's responsibility.
+ if r.CpuWeight != 0 {
+ if err := cgroups.WriteFile(dirPath, "cpu.weight", strconv.FormatUint(r.CpuWeight, 10)); err != nil {
+ return err
+ }
+ }
+
+ if r.CpuQuota != 0 || r.CpuPeriod != 0 {
+ str := "max"
+ if r.CpuQuota > 0 {
+ str = strconv.FormatInt(r.CpuQuota, 10)
+ }
+ period := r.CpuPeriod
+ if period == 0 {
+ // This default value is documented in
+ // https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
+ period = 100000
+ }
+ str += " " + strconv.FormatUint(period, 10)
+ if err := cgroups.WriteFile(dirPath, "cpu.max", str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func statCpu(dirPath string, stats *cgroups.Stats) error {
+ const file = "cpu.stat"
+ f, err := cgroups.OpenFile(dirPath, file, os.O_RDONLY)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ t, v, err := fscommon.ParseKeyValue(sc.Text())
+ if err != nil {
+ return &parseError{Path: dirPath, File: file, Err: err}
+ }
+ switch t {
+ case "usage_usec":
+ stats.CpuStats.CpuUsage.TotalUsage = v * 1000
+
+ case "user_usec":
+ stats.CpuStats.CpuUsage.UsageInUsermode = v * 1000
+
+ case "system_usec":
+ stats.CpuStats.CpuUsage.UsageInKernelmode = v * 1000
+
+ case "nr_periods":
+ stats.CpuStats.ThrottlingData.Periods = v
+
+ case "nr_throttled":
+ stats.CpuStats.ThrottlingData.ThrottledPeriods = v
+
+ case "throttled_usec":
+ stats.CpuStats.ThrottlingData.ThrottledTime = v * 1000
+ }
+ }
+ if err := sc.Err(); err != nil {
+ return &parseError{Path: dirPath, File: file, Err: err}
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go
new file mode 100644
index 000000000..16c45bad8
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go
@@ -0,0 +1,28 @@
+package fs2
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func isCpusetSet(r *configs.Resources) bool {
+ return r.CpusetCpus != "" || r.CpusetMems != ""
+}
+
+func setCpuset(dirPath string, r *configs.Resources) error {
+ if !isCpusetSet(r) {
+ return nil
+ }
+
+ if r.CpusetCpus != "" {
+ if err := cgroups.WriteFile(dirPath, "cpuset.cpus", r.CpusetCpus); err != nil {
+ return err
+ }
+ }
+ if r.CpusetMems != "" {
+ if err := cgroups.WriteFile(dirPath, "cpuset.mems", r.CpusetMems); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/create.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/create.go
new file mode 100644
index 000000000..641123a4d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/create.go
@@ -0,0 +1,152 @@
+package fs2
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func supportedControllers() (string, error) {
+ return cgroups.ReadFile(UnifiedMountpoint, "/cgroup.controllers")
+}
+
+// needAnyControllers returns whether we enable some supported controllers or not,
+// based on (1) controllers available and (2) resources that are being set.
+// We don't check "pseudo" controllers such as
+// "freezer" and "devices".
+func needAnyControllers(r *configs.Resources) (bool, error) {
+ if r == nil {
+ return false, nil
+ }
+
+ // list of all available controllers
+ content, err := supportedControllers()
+ if err != nil {
+ return false, err
+ }
+ avail := make(map[string]struct{})
+ for _, ctr := range strings.Fields(content) {
+ avail[ctr] = struct{}{}
+ }
+
+ // check whether the controller if available or not
+ have := func(controller string) bool {
+ _, ok := avail[controller]
+ return ok
+ }
+
+ if isPidsSet(r) && have("pids") {
+ return true, nil
+ }
+ if isMemorySet(r) && have("memory") {
+ return true, nil
+ }
+ if isIoSet(r) && have("io") {
+ return true, nil
+ }
+ if isCpuSet(r) && have("cpu") {
+ return true, nil
+ }
+ if isCpusetSet(r) && have("cpuset") {
+ return true, nil
+ }
+ if isHugeTlbSet(r) && have("hugetlb") {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// containsDomainController returns whether the current config contains domain controller or not.
+// Refer to: http://man7.org/linux/man-pages/man7/cgroups.7.html
+// As at Linux 4.19, the following controllers are threaded: cpu, perf_event, and pids.
+func containsDomainController(r *configs.Resources) bool {
+ return isMemorySet(r) || isIoSet(r) || isCpuSet(r) || isHugeTlbSet(r)
+}
+
+// CreateCgroupPath creates cgroupv2 path, enabling all the supported controllers.
+func CreateCgroupPath(path string, c *configs.Cgroup) (Err error) {
+ if !strings.HasPrefix(path, UnifiedMountpoint) {
+ return fmt.Errorf("invalid cgroup path %s", path)
+ }
+
+ content, err := supportedControllers()
+ if err != nil {
+ return err
+ }
+
+ const (
+ cgTypeFile = "cgroup.type"
+ cgStCtlFile = "cgroup.subtree_control"
+ )
+ ctrs := strings.Fields(content)
+ res := "+" + strings.Join(ctrs, " +")
+
+ elements := strings.Split(path, "/")
+ elements = elements[3:]
+ current := "/sys/fs"
+ for i, e := range elements {
+ current = filepath.Join(current, e)
+ if i > 0 {
+ if err := os.Mkdir(current, 0o755); err != nil {
+ if !os.IsExist(err) {
+ return err
+ }
+ } else {
+ // If the directory was created, be sure it is not left around on errors.
+ current := current
+ defer func() {
+ if Err != nil {
+ os.Remove(current)
+ }
+ }()
+ }
+ cgType, _ := cgroups.ReadFile(current, cgTypeFile)
+ cgType = strings.TrimSpace(cgType)
+ switch cgType {
+ // If the cgroup is in an invalid mode (usually this means there's an internal
+ // process in the cgroup tree, because we created a cgroup under an
+ // already-populated-by-other-processes cgroup), then we have to error out if
+ // the user requested controllers which are not thread-aware. However, if all
+ // the controllers requested are thread-aware we can simply put the cgroup into
+ // threaded mode.
+ case "domain invalid":
+ if containsDomainController(c.Resources) {
+ return fmt.Errorf("cannot enter cgroupv2 %q with domain controllers -- it is in an invalid state", current)
+ } else {
+ // Not entirely correct (in theory we'd always want to be a domain --
+ // since that means we're a properly delegated cgroup subtree) but in
+ // this case there's not much we can do and it's better than giving an
+ // error.
+ _ = cgroups.WriteFile(current, cgTypeFile, "threaded")
+ }
+ // If the cgroup is in (threaded) or (domain threaded) mode, we can only use thread-aware controllers
+ // (and you cannot usually take a cgroup out of threaded mode).
+ case "domain threaded":
+ fallthrough
+ case "threaded":
+ if containsDomainController(c.Resources) {
+ return fmt.Errorf("cannot enter cgroupv2 %q with domain controllers -- it is in %s mode", current, cgType)
+ }
+ }
+ }
+ // enable all supported controllers
+ if i < len(elements)-1 {
+ if err := cgroups.WriteFile(current, cgStCtlFile, res); err != nil {
+ // try write one by one
+ allCtrs := strings.Split(res, " ")
+ for _, ctr := range allCtrs {
+ _ = cgroups.WriteFile(current, cgStCtlFile, ctr)
+ }
+ }
+ // Some controllers might not be enabled when rootless or containerized,
+ // but we don't catch the error here. (Caught in setXXX() functions.)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go
new file mode 100644
index 000000000..9c949c91f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go
@@ -0,0 +1,99 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs2
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+const UnifiedMountpoint = "/sys/fs/cgroup"
+
+func defaultDirPath(c *configs.Cgroup) (string, error) {
+ if (c.Name != "" || c.Parent != "") && c.Path != "" {
+ return "", fmt.Errorf("cgroup: either Path or Name and Parent should be used, got %+v", c)
+ }
+
+ return _defaultDirPath(UnifiedMountpoint, c.Path, c.Parent, c.Name)
+}
+
+func _defaultDirPath(root, cgPath, cgParent, cgName string) (string, error) {
+ if (cgName != "" || cgParent != "") && cgPath != "" {
+ return "", errors.New("cgroup: either Path or Name and Parent should be used")
+ }
+
+ // XXX: Do not remove CleanPath. Path safety is important! -- cyphar
+ innerPath := utils.CleanPath(cgPath)
+ if innerPath == "" {
+ cgParent := utils.CleanPath(cgParent)
+ cgName := utils.CleanPath(cgName)
+ innerPath = filepath.Join(cgParent, cgName)
+ }
+ if filepath.IsAbs(innerPath) {
+ return filepath.Join(root, innerPath), nil
+ }
+
+ ownCgroup, err := parseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return "", err
+ }
+ // The current user scope most probably has tasks in it already,
+ // making it impossible to enable controllers for its sub-cgroup.
+ // A parent cgroup (with no tasks in it) is what we need.
+ ownCgroup = filepath.Dir(ownCgroup)
+
+ return filepath.Join(root, ownCgroup, innerPath), nil
+}
+
+// parseCgroupFile parses /proc/PID/cgroup file and return string
+func parseCgroupFile(path string) (string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ return parseCgroupFromReader(f)
+}
+
+func parseCgroupFromReader(r io.Reader) (string, error) {
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ var (
+ text = s.Text()
+ parts = strings.SplitN(text, ":", 3)
+ )
+ if len(parts) < 3 {
+ return "", fmt.Errorf("invalid cgroup entry: %q", text)
+ }
+ // text is like "0::/user.slice/user-1001.slice/session-1.scope"
+ if parts[0] == "0" && parts[1] == "" {
+ return parts[2], nil
+ }
+ }
+ if err := s.Err(); err != nil {
+ return "", err
+ }
+ return "", errors.New("cgroup path not found")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go
new file mode 100644
index 000000000..8917a6411
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go
@@ -0,0 +1,127 @@
+package fs2
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func setFreezer(dirPath string, state configs.FreezerState) error {
+ var stateStr string
+ switch state {
+ case configs.Undefined:
+ return nil
+ case configs.Frozen:
+ stateStr = "1"
+ case configs.Thawed:
+ stateStr = "0"
+ default:
+ return fmt.Errorf("invalid freezer state %q requested", state)
+ }
+
+ fd, err := cgroups.OpenFile(dirPath, "cgroup.freeze", unix.O_RDWR)
+ if err != nil {
+ // We can ignore this request as long as the user didn't ask us to
+ // freeze the container (since without the freezer cgroup, that's a
+ // no-op).
+ if state != configs.Frozen {
+ return nil
+ }
+ return fmt.Errorf("freezer not supported: %w", err)
+ }
+ defer fd.Close()
+
+ if _, err := fd.WriteString(stateStr); err != nil {
+ return err
+ }
+ // Confirm that the cgroup did actually change states.
+ if actualState, err := readFreezer(dirPath, fd); err != nil {
+ return err
+ } else if actualState != state {
+ return fmt.Errorf(`expected "cgroup.freeze" to be in state %q but was in %q`, state, actualState)
+ }
+ return nil
+}
+
+func getFreezer(dirPath string) (configs.FreezerState, error) {
+ fd, err := cgroups.OpenFile(dirPath, "cgroup.freeze", unix.O_RDONLY)
+ if err != nil {
+ // If the kernel is too old, then we just treat the freezer as being in
+ // an "undefined" state.
+ if os.IsNotExist(err) || errors.Is(err, unix.ENODEV) {
+ err = nil
+ }
+ return configs.Undefined, err
+ }
+ defer fd.Close()
+
+ return readFreezer(dirPath, fd)
+}
+
+func readFreezer(dirPath string, fd *os.File) (configs.FreezerState, error) {
+ if _, err := fd.Seek(0, 0); err != nil {
+ return configs.Undefined, err
+ }
+ state := make([]byte, 2)
+ if _, err := fd.Read(state); err != nil {
+ return configs.Undefined, err
+ }
+ switch string(state) {
+ case "0\n":
+ return configs.Thawed, nil
+ case "1\n":
+ return waitFrozen(dirPath)
+ default:
+ return configs.Undefined, fmt.Errorf(`unknown "cgroup.freeze" state: %q`, state)
+ }
+}
+
+// waitFrozen polls cgroup.events until it sees "frozen 1" in it.
+func waitFrozen(dirPath string) (configs.FreezerState, error) {
+ fd, err := cgroups.OpenFile(dirPath, "cgroup.events", unix.O_RDONLY)
+ if err != nil {
+ return configs.Undefined, err
+ }
+ defer fd.Close()
+
+ // XXX: Simple wait/read/retry is used here. An implementation
+ // based on poll(2) or inotify(7) is possible, but it makes the code
+ // much more complicated. Maybe address this later.
+ const (
+ // Perform maxIter with waitTime in between iterations.
+ waitTime = 10 * time.Millisecond
+ maxIter = 1000
+ )
+ scanner := bufio.NewScanner(fd)
+ for i := 0; scanner.Scan(); {
+ if i == maxIter {
+ return configs.Undefined, fmt.Errorf("timeout of %s reached waiting for the cgroup to freeze", waitTime*maxIter)
+ }
+ line := scanner.Text()
+ val := strings.TrimPrefix(line, "frozen ")
+ if val != line { // got prefix
+ if val[0] == '1' {
+ return configs.Frozen, nil
+ }
+
+ i++
+ // wait, then re-read
+ time.Sleep(waitTime)
+ _, err := fd.Seek(0, 0)
+ if err != nil {
+ return configs.Undefined, err
+ }
+ }
+ }
+ // Should only reach here either on read error,
+ // or if the file does not contain "frozen " line.
+ return configs.Undefined, scanner.Err()
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go
new file mode 100644
index 000000000..d5208d778
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go
@@ -0,0 +1,271 @@
+package fs2
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type parseError = fscommon.ParseError
+
+type manager struct {
+ config *configs.Cgroup
+ // dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope"
+ dirPath string
+ // controllers is content of "cgroup.controllers" file.
+ // excludes pseudo-controllers ("devices" and "freezer").
+ controllers map[string]struct{}
+}
+
+// NewManager creates a manager for cgroup v2 unified hierarchy.
+// dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope".
+// If dirPath is empty, it is automatically set using config.
+func NewManager(config *configs.Cgroup, dirPath string) (cgroups.Manager, error) {
+ if dirPath == "" {
+ var err error
+ dirPath, err = defaultDirPath(config)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ m := &manager{
+ config: config,
+ dirPath: dirPath,
+ }
+ return m, nil
+}
+
+func (m *manager) getControllers() error {
+ if m.controllers != nil {
+ return nil
+ }
+
+ data, err := cgroups.ReadFile(m.dirPath, "cgroup.controllers")
+ if err != nil {
+ if m.config.Rootless && m.config.Path == "" {
+ return nil
+ }
+ return err
+ }
+ fields := strings.Fields(data)
+ m.controllers = make(map[string]struct{}, len(fields))
+ for _, c := range fields {
+ m.controllers[c] = struct{}{}
+ }
+
+ return nil
+}
+
+func (m *manager) Apply(pid int) error {
+ if err := CreateCgroupPath(m.dirPath, m.config); err != nil {
+ // Related tests:
+ // - "runc create (no limits + no cgrouppath + no permission) succeeds"
+ // - "runc create (rootless + no limits + cgrouppath + no permission) fails with permission error"
+ // - "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
+ if m.config.Rootless {
+ if m.config.Path == "" {
+ if blNeed, nErr := needAnyControllers(m.config.Resources); nErr == nil && !blNeed {
+ return nil
+ }
+ return fmt.Errorf("rootless needs no limits + no cgrouppath when no permission is granted for cgroups: %w", err)
+ }
+ }
+ return err
+ }
+ if err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *manager) GetPids() ([]int, error) {
+ return cgroups.GetPids(m.dirPath)
+}
+
+func (m *manager) GetAllPids() ([]int, error) {
+ return cgroups.GetAllPids(m.dirPath)
+}
+
+func (m *manager) GetStats() (*cgroups.Stats, error) {
+ var errs []error
+
+ st := cgroups.NewStats()
+
+ // pids (since kernel 4.5)
+ if err := statPids(m.dirPath, st); err != nil {
+ errs = append(errs, err)
+ }
+ // memory (since kernel 4.5)
+ if err := statMemory(m.dirPath, st); err != nil && !os.IsNotExist(err) {
+ errs = append(errs, err)
+ }
+ // io (since kernel 4.5)
+ if err := statIo(m.dirPath, st); err != nil && !os.IsNotExist(err) {
+ errs = append(errs, err)
+ }
+ // cpu (since kernel 4.15)
+ // Note cpu.stat is available even if the controller is not enabled.
+ if err := statCpu(m.dirPath, st); err != nil && !os.IsNotExist(err) {
+ errs = append(errs, err)
+ }
+ // hugetlb (since kernel 5.6)
+ if err := statHugeTlb(m.dirPath, st); err != nil && !os.IsNotExist(err) {
+ errs = append(errs, err)
+ }
+ // rdma (since kernel 4.11)
+ if err := fscommon.RdmaGetStats(m.dirPath, st); err != nil && !os.IsNotExist(err) {
+ errs = append(errs, err)
+ }
+ if len(errs) > 0 && !m.config.Rootless {
+ return st, fmt.Errorf("error while statting cgroup v2: %+v", errs)
+ }
+ return st, nil
+}
+
+func (m *manager) Freeze(state configs.FreezerState) error {
+ if m.config.Resources == nil {
+ return errors.New("cannot toggle freezer: cgroups not configured for container")
+ }
+ if err := setFreezer(m.dirPath, state); err != nil {
+ return err
+ }
+ m.config.Resources.Freezer = state
+ return nil
+}
+
+func (m *manager) Destroy() error {
+ return cgroups.RemovePath(m.dirPath)
+}
+
+func (m *manager) Path(_ string) string {
+ return m.dirPath
+}
+
+func (m *manager) Set(r *configs.Resources) error {
+ if r == nil {
+ return nil
+ }
+ if err := m.getControllers(); err != nil {
+ return err
+ }
+ // pids (since kernel 4.5)
+ if err := setPids(m.dirPath, r); err != nil {
+ return err
+ }
+ // memory (since kernel 4.5)
+ if err := setMemory(m.dirPath, r); err != nil {
+ return err
+ }
+ // io (since kernel 4.5)
+ if err := setIo(m.dirPath, r); err != nil {
+ return err
+ }
+ // cpu (since kernel 4.15)
+ if err := setCpu(m.dirPath, r); err != nil {
+ return err
+ }
+ // devices (since kernel 4.15, pseudo-controller)
+ //
+ // When rootless is true, errors from the device subsystem are ignored because it is really not expected to work.
+ // However, errors from other subsystems are not ignored.
+ // see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
+ if err := setDevices(m.dirPath, r); err != nil {
+ if !m.config.Rootless || errors.Is(err, cgroups.ErrDevicesUnsupported) {
+ return err
+ }
+ }
+ // cpuset (since kernel 5.0)
+ if err := setCpuset(m.dirPath, r); err != nil {
+ return err
+ }
+ // hugetlb (since kernel 5.6)
+ if err := setHugeTlb(m.dirPath, r); err != nil {
+ return err
+ }
+ // rdma (since kernel 4.11)
+ if err := fscommon.RdmaSet(m.dirPath, r); err != nil {
+ return err
+ }
+ // freezer (since kernel 5.2, pseudo-controller)
+ if err := setFreezer(m.dirPath, r.Freezer); err != nil {
+ return err
+ }
+ if err := m.setUnified(r.Unified); err != nil {
+ return err
+ }
+ m.config.Resources = r
+ return nil
+}
+
+func setDevices(dirPath string, r *configs.Resources) error {
+ if cgroups.DevicesSetV2 == nil {
+ if len(r.Devices) > 0 {
+ return cgroups.ErrDevicesUnsupported
+ }
+ return nil
+ }
+ return cgroups.DevicesSetV2(dirPath, r)
+}
+
+func (m *manager) setUnified(res map[string]string) error {
+ for k, v := range res {
+ if strings.Contains(k, "/") {
+ return fmt.Errorf("unified resource %q must be a file name (no slashes)", k)
+ }
+ if err := cgroups.WriteFile(m.dirPath, k, v); err != nil {
+ // Check for both EPERM and ENOENT since O_CREAT is used by WriteFile.
+ if errors.Is(err, os.ErrPermission) || errors.Is(err, os.ErrNotExist) {
+ // Check if a controller is available,
+ // to give more specific error if not.
+ sk := strings.SplitN(k, ".", 2)
+ if len(sk) != 2 {
+ return fmt.Errorf("unified resource %q must be in the form CONTROLLER.PARAMETER", k)
+ }
+ c := sk[0]
+ if _, ok := m.controllers[c]; !ok && c != "cgroup" {
+ return fmt.Errorf("unified resource %q can't be set: controller %q not available", k, c)
+ }
+ }
+ return fmt.Errorf("unable to set unified resource %q: %w", k, err)
+ }
+ }
+
+ return nil
+}
+
+func (m *manager) GetPaths() map[string]string {
+ paths := make(map[string]string, 1)
+ paths[""] = m.dirPath
+ return paths
+}
+
+func (m *manager) GetCgroups() (*configs.Cgroup, error) {
+ return m.config, nil
+}
+
+func (m *manager) GetFreezerState() (configs.FreezerState, error) {
+ return getFreezer(m.dirPath)
+}
+
+func (m *manager) Exists() bool {
+ return cgroups.PathExists(m.dirPath)
+}
+
+func OOMKillCount(path string) (uint64, error) {
+ return fscommon.GetValueByKey(path, "memory.events", "oom_kill")
+}
+
+func (m *manager) OOMKillCount() (uint64, error) {
+ c, err := OOMKillCount(m.dirPath)
+ if err != nil && m.config.Rootless && os.IsNotExist(err) {
+ err = nil
+ }
+
+ return c, err
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go
new file mode 100644
index 000000000..c92a7e64a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go
@@ -0,0 +1,48 @@
+package fs2
+
+import (
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func isHugeTlbSet(r *configs.Resources) bool {
+ return len(r.HugetlbLimit) > 0
+}
+
+func setHugeTlb(dirPath string, r *configs.Resources) error {
+ if !isHugeTlbSet(r) {
+ return nil
+ }
+ for _, hugetlb := range r.HugetlbLimit {
+ if err := cgroups.WriteFile(dirPath, "hugetlb."+hugetlb.Pagesize+".max", strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func statHugeTlb(dirPath string, stats *cgroups.Stats) error {
+ hugetlbStats := cgroups.HugetlbStats{}
+ for _, pagesize := range cgroups.HugePageSizes() {
+ value, err := fscommon.GetCgroupParamUint(dirPath, "hugetlb."+pagesize+".current")
+ if err != nil {
+ return err
+ }
+ hugetlbStats.Usage = value
+
+ fileName := "hugetlb." + pagesize + ".events"
+ value, err = fscommon.GetValueByKey(dirPath, fileName, "max")
+ if err != nil {
+ return err
+ }
+ hugetlbStats.Failcnt = value
+
+ stats.HugetlbStats[pagesize] = hugetlbStats
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go
new file mode 100644
index 000000000..b2ff7d340
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go
@@ -0,0 +1,193 @@
+package fs2
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func isIoSet(r *configs.Resources) bool {
+ return r.BlkioWeight != 0 ||
+ len(r.BlkioWeightDevice) > 0 ||
+ len(r.BlkioThrottleReadBpsDevice) > 0 ||
+ len(r.BlkioThrottleWriteBpsDevice) > 0 ||
+ len(r.BlkioThrottleReadIOPSDevice) > 0 ||
+ len(r.BlkioThrottleWriteIOPSDevice) > 0
+}
+
+// bfqDeviceWeightSupported checks for per-device BFQ weight support (added
+// in kernel v5.4, commit 795fe54c2a8) by reading from "io.bfq.weight".
+func bfqDeviceWeightSupported(bfq *os.File) bool {
+ if bfq == nil {
+ return false
+ }
+ _, _ = bfq.Seek(0, 0)
+ buf := make([]byte, 32)
+ _, _ = bfq.Read(buf)
+ // If only a single number (default weight) if read back, we have older kernel.
+ _, err := strconv.ParseInt(string(bytes.TrimSpace(buf)), 10, 64)
+ return err != nil
+}
+
+func setIo(dirPath string, r *configs.Resources) error {
+ if !isIoSet(r) {
+ return nil
+ }
+
+ // If BFQ IO scheduler is available, use it.
+ var bfq *os.File
+ if r.BlkioWeight != 0 || len(r.BlkioWeightDevice) > 0 {
+ var err error
+ bfq, err = cgroups.OpenFile(dirPath, "io.bfq.weight", os.O_RDWR)
+ if err == nil {
+ defer bfq.Close()
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+ }
+
+ if r.BlkioWeight != 0 {
+ if bfq != nil { // Use BFQ.
+ if _, err := bfq.WriteString(strconv.FormatUint(uint64(r.BlkioWeight), 10)); err != nil {
+ return err
+ }
+ } else {
+ // Fallback to io.weight with a conversion scheme.
+ v := cgroups.ConvertBlkIOToIOWeightValue(r.BlkioWeight)
+ if err := cgroups.WriteFile(dirPath, "io.weight", strconv.FormatUint(v, 10)); err != nil {
+ return err
+ }
+ }
+ }
+ if bfqDeviceWeightSupported(bfq) {
+ for _, wd := range r.BlkioWeightDevice {
+ if _, err := bfq.WriteString(wd.WeightString() + "\n"); err != nil {
+ return fmt.Errorf("setting device weight %q: %w", wd.WeightString(), err)
+ }
+ }
+ }
+ for _, td := range r.BlkioThrottleReadBpsDevice {
+ if err := cgroups.WriteFile(dirPath, "io.max", td.StringName("rbps")); err != nil {
+ return err
+ }
+ }
+ for _, td := range r.BlkioThrottleWriteBpsDevice {
+ if err := cgroups.WriteFile(dirPath, "io.max", td.StringName("wbps")); err != nil {
+ return err
+ }
+ }
+ for _, td := range r.BlkioThrottleReadIOPSDevice {
+ if err := cgroups.WriteFile(dirPath, "io.max", td.StringName("riops")); err != nil {
+ return err
+ }
+ }
+ for _, td := range r.BlkioThrottleWriteIOPSDevice {
+ if err := cgroups.WriteFile(dirPath, "io.max", td.StringName("wiops")); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func readCgroup2MapFile(dirPath string, name string) (map[string][]string, error) {
+ ret := map[string][]string{}
+ f, err := cgroups.OpenFile(dirPath, name, os.O_RDONLY)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(line)
+ if len(parts) < 2 {
+ continue
+ }
+ ret[parts[0]] = parts[1:]
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, &parseError{Path: dirPath, File: name, Err: err}
+ }
+ return ret, nil
+}
+
+func statIo(dirPath string, stats *cgroups.Stats) error {
+ const file = "io.stat"
+ values, err := readCgroup2MapFile(dirPath, file)
+ if err != nil {
+ return err
+ }
+ // more details on the io.stat file format: https://www.kernel.org/doc/Documentation/cgroup-v2.txt
+ var parsedStats cgroups.BlkioStats
+ for k, v := range values {
+ d := strings.Split(k, ":")
+ if len(d) != 2 {
+ continue
+ }
+ major, err := strconv.ParseUint(d[0], 10, 64)
+ if err != nil {
+ return &parseError{Path: dirPath, File: file, Err: err}
+ }
+ minor, err := strconv.ParseUint(d[1], 10, 64)
+ if err != nil {
+ return &parseError{Path: dirPath, File: file, Err: err}
+ }
+
+ for _, item := range v {
+ d := strings.Split(item, "=")
+ if len(d) != 2 {
+ continue
+ }
+ op := d[0]
+
+ // Map to the cgroupv1 naming and layout (in separate tables).
+ var targetTable *[]cgroups.BlkioStatEntry
+ switch op {
+ // Equivalent to cgroupv1's blkio.io_service_bytes.
+ case "rbytes":
+ op = "Read"
+ targetTable = &parsedStats.IoServiceBytesRecursive
+ case "wbytes":
+ op = "Write"
+ targetTable = &parsedStats.IoServiceBytesRecursive
+ // Equivalent to cgroupv1's blkio.io_serviced.
+ case "rios":
+ op = "Read"
+ targetTable = &parsedStats.IoServicedRecursive
+ case "wios":
+ op = "Write"
+ targetTable = &parsedStats.IoServicedRecursive
+ default:
+ // Skip over entries we cannot map to cgroupv1 stats for now.
+ // In the future we should expand the stats struct to include
+ // them.
+ logrus.Debugf("cgroupv2 io stats: skipping over unmappable %s entry", item)
+ continue
+ }
+
+ value, err := strconv.ParseUint(d[1], 10, 64)
+ if err != nil {
+ return &parseError{Path: dirPath, File: file, Err: err}
+ }
+
+ entry := cgroups.BlkioStatEntry{
+ Op: op,
+ Major: major,
+ Minor: minor,
+ Value: value,
+ }
+ *targetTable = append(*targetTable, entry)
+ }
+ }
+ stats.BlkioStats = parsedStats
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go
new file mode 100644
index 000000000..adbc4b230
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go
@@ -0,0 +1,216 @@
+package fs2
+
+import (
+ "bufio"
+ "errors"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+// numToStr converts an int64 value to a string for writing to a
+// cgroupv2 files with .min, .max, .low, or .high suffix.
+// The value of -1 is converted to "max" for cgroupv1 compatibility
+// (which used to write -1 to remove the limit).
+func numToStr(value int64) (ret string) {
+ switch {
+ case value == 0:
+ ret = ""
+ case value == -1:
+ ret = "max"
+ default:
+ ret = strconv.FormatInt(value, 10)
+ }
+
+ return ret
+}
+
+func isMemorySet(r *configs.Resources) bool {
+ return r.MemoryReservation != 0 || r.Memory != 0 || r.MemorySwap != 0
+}
+
+func setMemory(dirPath string, r *configs.Resources) error {
+ if !isMemorySet(r) {
+ return nil
+ }
+ swap, err := cgroups.ConvertMemorySwapToCgroupV2Value(r.MemorySwap, r.Memory)
+ if err != nil {
+ return err
+ }
+ swapStr := numToStr(swap)
+ if swapStr == "" && swap == 0 && r.MemorySwap > 0 {
+ // memory and memorySwap set to the same value -- disable swap
+ swapStr = "0"
+ }
+ // never write empty string to `memory.swap.max`, it means set to 0.
+ if swapStr != "" {
+ if err := cgroups.WriteFile(dirPath, "memory.swap.max", swapStr); err != nil {
+ return err
+ }
+ }
+
+ if val := numToStr(r.Memory); val != "" {
+ if err := cgroups.WriteFile(dirPath, "memory.max", val); err != nil {
+ return err
+ }
+ }
+
+ // cgroup.Resources.KernelMemory is ignored
+
+ if val := numToStr(r.MemoryReservation); val != "" {
+ if err := cgroups.WriteFile(dirPath, "memory.low", val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func statMemory(dirPath string, stats *cgroups.Stats) error {
+ const file = "memory.stat"
+ statsFile, err := cgroups.OpenFile(dirPath, file, os.O_RDONLY)
+ if err != nil {
+ return err
+ }
+ defer statsFile.Close()
+
+ sc := bufio.NewScanner(statsFile)
+ for sc.Scan() {
+ t, v, err := fscommon.ParseKeyValue(sc.Text())
+ if err != nil {
+ return &parseError{Path: dirPath, File: file, Err: err}
+ }
+ stats.MemoryStats.Stats[t] = v
+ }
+ if err := sc.Err(); err != nil {
+ return &parseError{Path: dirPath, File: file, Err: err}
+ }
+ stats.MemoryStats.Cache = stats.MemoryStats.Stats["file"]
+ // Unlike cgroup v1 which has memory.use_hierarchy binary knob,
+ // cgroup v2 is always hierarchical.
+ stats.MemoryStats.UseHierarchy = true
+
+ memoryUsage, err := getMemoryDataV2(dirPath, "")
+ if err != nil {
+ if errors.Is(err, unix.ENOENT) && dirPath == UnifiedMountpoint {
+ // The root cgroup does not have memory.{current,max}
+ // so emulate those using data from /proc/meminfo.
+ return statsFromMeminfo(stats)
+ }
+ return err
+ }
+ stats.MemoryStats.Usage = memoryUsage
+ swapUsage, err := getMemoryDataV2(dirPath, "swap")
+ if err != nil {
+ return err
+ }
+ // As cgroup v1 reports SwapUsage values as mem+swap combined,
+ // while in cgroup v2 swap values do not include memory,
+ // report combined mem+swap for v1 compatibility.
+ swapUsage.Usage += memoryUsage.Usage
+ if swapUsage.Limit != math.MaxUint64 {
+ swapUsage.Limit += memoryUsage.Limit
+ }
+ stats.MemoryStats.SwapUsage = swapUsage
+
+ return nil
+}
+
+func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) {
+ memoryData := cgroups.MemoryData{}
+
+ moduleName := "memory"
+ if name != "" {
+ moduleName = "memory." + name
+ }
+ usage := moduleName + ".current"
+ limit := moduleName + ".max"
+
+ value, err := fscommon.GetCgroupParamUint(path, usage)
+ if err != nil {
+ if name != "" && os.IsNotExist(err) {
+ // Ignore EEXIST as there's no swap accounting
+ // if kernel CONFIG_MEMCG_SWAP is not set or
+ // swapaccount=0 kernel boot parameter is given.
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, err
+ }
+ memoryData.Usage = value
+
+ value, err = fscommon.GetCgroupParamUint(path, limit)
+ if err != nil {
+ return cgroups.MemoryData{}, err
+ }
+ memoryData.Limit = value
+
+ return memoryData, nil
+}
+
+func statsFromMeminfo(stats *cgroups.Stats) error {
+ const file = "/proc/meminfo"
+ f, err := os.Open(file)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Fields we are interested in.
+ var (
+ swap_free uint64
+ swap_total uint64
+ main_total uint64
+ main_free uint64
+ )
+ mem := map[string]*uint64{
+ "SwapFree": &swap_free,
+ "SwapTotal": &swap_total,
+ "MemTotal": &main_total,
+ "MemFree": &main_free,
+ }
+
+ found := 0
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ parts := strings.SplitN(sc.Text(), ":", 3)
+ if len(parts) != 2 {
+ // Should not happen.
+ continue
+ }
+ k := parts[0]
+ p, ok := mem[k]
+ if !ok {
+ // Unknown field -- not interested.
+ continue
+ }
+ vStr := strings.TrimSpace(strings.TrimSuffix(parts[1], " kB"))
+ *p, err = strconv.ParseUint(vStr, 10, 64)
+ if err != nil {
+ return &parseError{File: file, Err: errors.New("bad value for " + k)}
+ }
+
+ found++
+ if found == len(mem) {
+ // Got everything we need -- skip the rest.
+ break
+ }
+ }
+ if err := sc.Err(); err != nil {
+ return &parseError{Path: "", File: file, Err: err}
+ }
+
+ stats.MemoryStats.SwapUsage.Usage = (swap_total - swap_free) * 1024
+ stats.MemoryStats.SwapUsage.Limit = math.MaxUint64
+
+ stats.MemoryStats.Usage.Usage = (main_total - main_free) * 1024
+ stats.MemoryStats.Usage.Limit = math.MaxUint64
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go
new file mode 100644
index 000000000..c8c4a3658
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go
@@ -0,0 +1,72 @@
+package fs2
+
+import (
+ "errors"
+ "math"
+ "os"
+ "strings"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func isPidsSet(r *configs.Resources) bool {
+ return r.PidsLimit != 0
+}
+
+func setPids(dirPath string, r *configs.Resources) error {
+ if !isPidsSet(r) {
+ return nil
+ }
+ if val := numToStr(r.PidsLimit); val != "" {
+ if err := cgroups.WriteFile(dirPath, "pids.max", val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func statPidsFromCgroupProcs(dirPath string, stats *cgroups.Stats) error {
+ // if the controller is not enabled, let's read PIDS from cgroups.procs
+ // (or threads if cgroup.threads is enabled)
+ contents, err := cgroups.ReadFile(dirPath, "cgroup.procs")
+ if errors.Is(err, unix.ENOTSUP) {
+ contents, err = cgroups.ReadFile(dirPath, "cgroup.threads")
+ }
+ if err != nil {
+ return err
+ }
+ pids := strings.Count(contents, "\n")
+ stats.PidsStats.Current = uint64(pids)
+ stats.PidsStats.Limit = 0
+ return nil
+}
+
+func statPids(dirPath string, stats *cgroups.Stats) error {
+ current, err := fscommon.GetCgroupParamUint(dirPath, "pids.current")
+ if err != nil {
+ if os.IsNotExist(err) {
+ return statPidsFromCgroupProcs(dirPath, stats)
+ }
+ return err
+ }
+
+ max, err := fscommon.GetCgroupParamUint(dirPath, "pids.max")
+ if err != nil {
+ return err
+ }
+ // If no limit is set, read from pids.max returns "max", which is
+ // converted to MaxUint64 by GetCgroupParamUint. Historically, we
+ // represent "no limit" for pids as 0, thus this conversion.
+ if max == math.MaxUint64 {
+ max = 0
+ }
+
+ stats.PidsStats.Current = current
+ stats.PidsStats.Limit = max
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go
new file mode 100644
index 000000000..d463d15ee
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go
@@ -0,0 +1,121 @@
+package fscommon
+
+import (
+ "bufio"
+ "errors"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "golang.org/x/sys/unix"
+)
+
+// parseRdmaKV parses raw string to RdmaEntry.
+func parseRdmaKV(raw string, entry *cgroups.RdmaEntry) error {
+ var value uint32
+
+ parts := strings.SplitN(raw, "=", 3)
+
+ if len(parts) != 2 {
+ return errors.New("Unable to parse RDMA entry")
+ }
+
+ k, v := parts[0], parts[1]
+
+ if v == "max" {
+ value = math.MaxUint32
+ } else {
+ val64, err := strconv.ParseUint(v, 10, 32)
+ if err != nil {
+ return err
+ }
+ value = uint32(val64)
+ }
+ if k == "hca_handle" {
+ entry.HcaHandles = value
+ } else if k == "hca_object" {
+ entry.HcaObjects = value
+ }
+
+ return nil
+}
+
+// readRdmaEntries reads and converts array of rawstrings to RdmaEntries from file.
+// example entry: mlx4_0 hca_handle=2 hca_object=2000
+func readRdmaEntries(dir, file string) ([]cgroups.RdmaEntry, error) {
+ rdmaEntries := make([]cgroups.RdmaEntry, 0)
+ fd, err := cgroups.OpenFile(dir, file, unix.O_RDONLY)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close() //nolint:errorlint
+ scanner := bufio.NewScanner(fd)
+ for scanner.Scan() {
+ parts := strings.SplitN(scanner.Text(), " ", 4)
+ if len(parts) == 3 {
+ entry := new(cgroups.RdmaEntry)
+ entry.Device = parts[0]
+ err = parseRdmaKV(parts[1], entry)
+ if err != nil {
+ continue
+ }
+ err = parseRdmaKV(parts[2], entry)
+ if err != nil {
+ continue
+ }
+
+ rdmaEntries = append(rdmaEntries, *entry)
+ }
+ }
+ return rdmaEntries, scanner.Err()
+}
+
+// RdmaGetStats returns rdma stats such as totalLimit and current entries.
+func RdmaGetStats(path string, stats *cgroups.Stats) error {
+ currentEntries, err := readRdmaEntries(path, "rdma.current")
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = nil
+ }
+ return err
+ }
+ maxEntries, err := readRdmaEntries(path, "rdma.max")
+ if err != nil {
+ return err
+ }
+ // If device got removed between reading two files, ignore returning stats.
+ if len(currentEntries) != len(maxEntries) {
+ return nil
+ }
+
+ stats.RdmaStats = cgroups.RdmaStats{
+ RdmaLimit: maxEntries,
+ RdmaCurrent: currentEntries,
+ }
+
+ return nil
+}
+
+func createCmdString(device string, limits configs.LinuxRdma) string {
+ cmdString := device
+ if limits.HcaHandles != nil {
+ cmdString += " hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10)
+ }
+ if limits.HcaObjects != nil {
+ cmdString += " hca_object=" + strconv.FormatUint(uint64(*limits.HcaObjects), 10)
+ }
+ return cmdString
+}
+
+// RdmaSet sets RDMA resources.
+func RdmaSet(path string, r *configs.Resources) error {
+ for device, limits := range r.Rdma {
+ if err := cgroups.WriteFile(path, "rdma.max", createCmdString(device, limits)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go
new file mode 100644
index 000000000..f4a51c9e5
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go
@@ -0,0 +1,145 @@
+package fscommon
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "path"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+)
+
+var (
+ // Deprecated: use cgroups.OpenFile instead.
+ OpenFile = cgroups.OpenFile
+ // Deprecated: use cgroups.ReadFile instead.
+ ReadFile = cgroups.ReadFile
+ // Deprecated: use cgroups.WriteFile instead.
+ WriteFile = cgroups.WriteFile
+)
+
+// ParseError records a parse error details, including the file path.
+type ParseError struct {
+ Path string
+ File string
+ Err error
+}
+
+func (e *ParseError) Error() string {
+ return "unable to parse " + path.Join(e.Path, e.File) + ": " + e.Err.Error()
+}
+
+func (e *ParseError) Unwrap() error { return e.Err }
+
+// ParseUint converts a string to an uint64 integer.
+// Negative values are returned at zero as, due to kernel bugs,
+// some of the memory cgroup stats can be negative.
+func ParseUint(s string, base, bitSize int) (uint64, error) {
+ value, err := strconv.ParseUint(s, base, bitSize)
+ if err != nil {
+ intValue, intErr := strconv.ParseInt(s, base, bitSize)
+ // 1. Handle negative values greater than MinInt64 (and)
+ // 2. Handle negative values lesser than MinInt64
+ if intErr == nil && intValue < 0 {
+ return 0, nil
+ } else if errors.Is(intErr, strconv.ErrRange) && intValue < 0 {
+ return 0, nil
+ }
+
+ return value, err
+ }
+
+ return value, nil
+}
+
+// ParseKeyValue parses a space-separated "name value" kind of cgroup
+// parameter and returns its key as a string, and its value as uint64
+// (ParseUint is used to convert the value). For example,
+// "io_service_bytes 1234" will be returned as "io_service_bytes", 1234.
+func ParseKeyValue(t string) (string, uint64, error) {
+ parts := strings.SplitN(t, " ", 3)
+ if len(parts) != 2 {
+ return "", 0, fmt.Errorf("line %q is not in key value format", t)
+ }
+
+ value, err := ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return "", 0, err
+ }
+
+ return parts[0], value, nil
+}
+
+// GetValueByKey reads a key-value pairs from the specified cgroup file,
+// and returns a value of the specified key. ParseUint is used for value
+// conversion.
+func GetValueByKey(path, file, key string) (uint64, error) {
+ content, err := cgroups.ReadFile(path, file)
+ if err != nil {
+ return 0, err
+ }
+
+ lines := strings.Split(content, "\n")
+ for _, line := range lines {
+ arr := strings.Split(line, " ")
+ if len(arr) == 2 && arr[0] == key {
+ val, err := ParseUint(arr[1], 10, 64)
+ if err != nil {
+ err = &ParseError{Path: path, File: file, Err: err}
+ }
+ return val, err
+ }
+ }
+
+ return 0, nil
+}
+
+// GetCgroupParamUint reads a single uint64 value from the specified cgroup file.
+// If the value read is "max", the math.MaxUint64 is returned.
+func GetCgroupParamUint(path, file string) (uint64, error) {
+ contents, err := GetCgroupParamString(path, file)
+ if err != nil {
+ return 0, err
+ }
+ contents = strings.TrimSpace(contents)
+ if contents == "max" {
+ return math.MaxUint64, nil
+ }
+
+ res, err := ParseUint(contents, 10, 64)
+ if err != nil {
+ return res, &ParseError{Path: path, File: file, Err: err}
+ }
+ return res, nil
+}
+
+// GetCgroupParamInt reads a single int64 value from specified cgroup file.
+// If the value read is "max", the math.MaxInt64 is returned.
+func GetCgroupParamInt(path, file string) (int64, error) {
+ contents, err := cgroups.ReadFile(path, file)
+ if err != nil {
+ return 0, err
+ }
+ contents = strings.TrimSpace(contents)
+ if contents == "max" {
+ return math.MaxInt64, nil
+ }
+
+ res, err := strconv.ParseInt(contents, 10, 64)
+ if err != nil {
+ return res, &ParseError{Path: path, File: file, Err: err}
+ }
+ return res, nil
+}
+
+// GetCgroupParamString reads a string from the specified cgroup file.
+func GetCgroupParamString(path, file string) (string, error) {
+ contents, err := cgroups.ReadFile(path, file)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(contents), nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
index fa195bf90..865344f99 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
@@ -2,8 +2,8 @@ package configs
import "fmt"
-// blockIODevice holds major:minor format supported in blkio cgroup
-type blockIODevice struct {
+// BlockIODevice holds major:minor format supported in blkio cgroup.
+type BlockIODevice struct {
// Major is the device's major number
Major int64 `json:"major"`
// Minor is the device's minor number
@@ -12,7 +12,7 @@ type blockIODevice struct {
// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair
type WeightDevice struct {
- blockIODevice
+ BlockIODevice
// Weight is the bandwidth rate for the device, range is from 10 to 1000
Weight uint16 `json:"weight"`
// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
@@ -41,7 +41,7 @@ func (wd *WeightDevice) LeafWeightString() string {
// ThrottleDevice struct holds a `major:minor rate_per_second` pair
type ThrottleDevice struct {
- blockIODevice
+ BlockIODevice
// Rate is the IO rate limit per cgroup per device
Rate uint64 `json:"rate"`
}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
index c1b4a0041..7cf2fb657 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -83,9 +83,6 @@ type Syscall struct {
Args []*Arg `json:"args"`
}
-// TODO Windows. Many of these fields should be factored out into those parts
-// which are common across platforms, and those which are platform specific.
-
// Config defines configuration options for executing a process inside a contained environment.
type Config struct {
// NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
index 784c61820..b4c616d55 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
@@ -35,12 +35,6 @@ type Mount struct {
// Extensions are additional flags that are specific to runc.
Extensions int `json:"extensions"`
-
- // Optional Command to be run before Source is mounted.
- PremountCmds []Command `json:"premount_cmds"`
-
- // Optional Command to be run after Source is mounted.
- PostmountCmds []Command `json:"postmount_cmds"`
}
func (m *Mount) IsBind() bool {
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
index 6b9fc3435..dbd435341 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -132,19 +132,16 @@ func WithProcfd(root, unsafePath string, fn func(procfd string) error) error {
return fn(procfd)
}
-// SearchLabels searches a list of key-value pairs for the provided key and
-// returns the corresponding value. The pairs must be separated with '='.
-func SearchLabels(labels []string, query string) string {
- for _, l := range labels {
- parts := strings.SplitN(l, "=", 2)
- if len(parts) < 2 {
- continue
- }
- if parts[0] == query {
- return parts[1]
+// SearchLabels searches through a list of key=value pairs for a given key,
+// returning its value, and the binary flag telling whether the key exist.
+func SearchLabels(labels []string, key string) (string, bool) {
+ key += "="
+ for _, s := range labels {
+ if strings.HasPrefix(s, key) {
+ return s[len(key):], true
}
}
- return ""
+ return "", false
}
// Annotations returns the bundle path and user defined annotations from the
diff --git a/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml
new file mode 100644
index 000000000..7df8aa198
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml
@@ -0,0 +1,4 @@
+# For documentation, see https://golangci-lint.run/usage/configuration/
+linters:
+ enable:
+ - gofumpt
diff --git a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml
deleted file mode 100644
index 5240d4622..000000000
--- a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-# Travis CI configuration for libseccomp-golang
-
-# https://docs.travis-ci.com/user/reference/bionic
-# https://wiki.ubuntu.com/Releases
-
-dist: bionic
-sudo: false
-
-notifications:
- email:
- on_success: always
- on_failure: always
-
-arch:
- - amd64
-
-os:
- - linux
-
-language: go
-
-jobs:
- include:
- - name: "last libseccomp 2.5.0"
- env:
- - SECCOMP_VER=2.5.0
- - SECCOMP_SHA256SUM=1ffa7038d2720ad191919816db3479295a4bcca1ec14e02f672539f4983014f3
- - name: "compat libseccomp 2.4.4"
- env:
- - SECCOMP_VER=2.4.4
- - SECCOMP_SHA256SUM=4e79738d1ef3c9b7ca9769f1f8b8d84fc17143c2c1c432e53b9c64787e0ff3eb
- - name: "compat libseccomp 2.2.1"
- env:
- - SECCOMP_VER=2.2.1
- - SECCOMP_SHA256SUM=0ba1789f54786c644af54cdffc9fd0dd0a8bb2b2ee153933f658855d2851a740
-
-addons:
- apt:
- packages:
- - build-essential
- - astyle
- - golint
- - gperf
-
-install:
- - go get -u golang.org/x/lint/golint
-
-# run all of the tests independently, fail if any of the tests error
-script:
- - wget https://github.com/seccomp/libseccomp/releases/download/v$SECCOMP_VER/libseccomp-$SECCOMP_VER.tar.gz
- - echo $SECCOMP_SHA256SUM libseccomp-$SECCOMP_VER.tar.gz | sha256sum -c
- - tar xf libseccomp-$SECCOMP_VER.tar.gz
- - pushd libseccomp-$SECCOMP_VER && ./configure --prefix=/opt/libseccomp-$SECCOMP_VER && make && sudo make install && popd
- - make check-syntax
- - make lint
- - PKG_CONFIG_PATH=/opt/libseccomp-$SECCOMP_VER/lib/pkgconfig LD_LIBRARY_PATH=/opt/libseccomp-$SECCOMP_VER/lib make vet
- - PKG_CONFIG_PATH=/opt/libseccomp-$SECCOMP_VER/lib/pkgconfig LD_LIBRARY_PATH=/opt/libseccomp-$SECCOMP_VER/lib make test
diff --git a/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG b/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG
index a01d9a722..905a9b5cd 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG
+++ b/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG
@@ -2,6 +2,31 @@ libseccomp-golang: Releases
===============================================================================
https://github.com/seccomp/libseccomp-golang
+* Version 0.10.0 - June 9, 2022
+- Minimum supported version of libseccomp bumped to v2.3.1
+- Add seccomp userspace notification API (ActNotify, filter.*Notif*)
+- Add filter.{Get,Set}SSB (to support SCMP_FLTATR_CTL_SSB)
+- Add filter.{Get,Set}Optimize (to support SCMP_FLTATR_CTL_OPTIMIZE)
+- Add filter.{Get,Set}RawRC (to support SCMP_FLTATR_API_SYSRAWRC)
+- Add ArchPARISC, ArchPARISC64, ArchRISCV64
+- Add ActKillProcess and ActKillThread; deprecate ActKill
+- Add go module support
+- Return ErrSyscallDoesNotExist when unable to resolve a syscall
+- Fix some functions to check for both kernel level API and libseccomp version
+- Fix MakeCondition to use sanitizeCompareOp
+- Fix AddRule to handle EACCES (from libseccomp >= 2.5.0)
+- Updated the main docs and converted to README.md
+- Added CONTRIBUTING.md, SECURITY.md, and administrative docs under doc/admin
+- Add GitHub action CI, enable more linters
+- test: test against various libseccomp versions
+- test: fix and simplify execInSubprocess
+- test: fix APILevelIsSupported
+- Refactor the Errno(-1 * retCode) pattern
+- Refactor/unify libseccomp version / API level checks
+- Code cleanups (linter, formatting, spelling fixes)
+- Cleanup: use errors.New instead of fmt.Errorf where appropriate
+- Cleanup: remove duplicated cgo stuff, redundant linux build tag
+
* Version 0.9.1 - May 21, 2019
- Minimum supported version of libseccomp bumped to v2.2.0
- Use Libseccomp's `seccomp_version` API to retrieve library version
diff --git a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
index d6862cbd5..c2fc80d5a 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
+++ b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
@@ -1,31 +1,23 @@
-How to Submit Patches to the libseccomp Project
+How to Submit Patches to the libseccomp-golang Project
===============================================================================
https://github.com/seccomp/libseccomp-golang
This document is intended to act as a guide to help you contribute to the
-libseccomp project. It is not perfect, and there will always be exceptions
-to the rules described here, but by following the instructions below you
-should have a much easier time getting your work merged with the upstream
+libseccomp-golang project. It is not perfect, and there will always be
+exceptions to the rules described here, but by following the instructions below
+you should have a much easier time getting your work merged with the upstream
project.
## Test Your Code Using Existing Tests
-There are two possible tests you can run to verify your code. The first
-test is used to check the formatting and coding style of your changes, you
-can run the test with the following command:
-
- # make check-syntax
-
-... if there are any problems with your changes a diff/patch will be shown
-which indicates the problems and how to fix them.
-
-The second possible test is used to ensure the sanity of your code changes
-and to test these changes against the included tests. You can run the test
-with the following command:
+A number of tests and lint related recipes are provided in the Makefile, if
+you want to run the standard regression tests, you can execute the following:
# make check
-... if there are any faults or errors they will be displayed.
+In order to use it, the 'golangci-lint' tool is needed, which can be found at:
+
+* https://github.com/golangci/golangci-lint
## Add New Tests for New Functionality
diff --git a/vendor/github.com/seccomp/libseccomp-golang/Makefile b/vendor/github.com/seccomp/libseccomp-golang/Makefile
index 38cfa852c..530f5b4ad 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/Makefile
+++ b/vendor/github.com/seccomp/libseccomp-golang/Makefile
@@ -4,7 +4,7 @@
all: check-build
-check: vet test
+check: lint test
check-build:
go build
@@ -16,7 +16,7 @@ fix-syntax:
gofmt -w .
vet:
- go vet -v
+ go vet -v ./...
# Previous bugs have made the tests freeze until the timeout. Golang default
# timeout for tests is 10 minutes, which is too long, considering current tests
@@ -28,5 +28,4 @@ test:
go test -v -timeout $(TEST_TIMEOUT)
lint:
- @$(if $(shell which golint),true,$(error "install golint and include it in your PATH"))
- golint -set_exit_status
+ golangci-lint run .
diff --git a/vendor/github.com/seccomp/libseccomp-golang/README.md b/vendor/github.com/seccomp/libseccomp-golang/README.md
index 806a5ddf2..312135ee5 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/README.md
+++ b/vendor/github.com/seccomp/libseccomp-golang/README.md
@@ -2,7 +2,9 @@
===============================================================================
https://github.com/seccomp/libseccomp-golang
-[![Build Status](https://img.shields.io/travis/seccomp/libseccomp-golang/main.svg)](https://travis-ci.org/seccomp/libseccomp-golang)
+[![Go Reference](https://pkg.go.dev/badge/github.com/seccomp/libseccomp-golang.svg)](https://pkg.go.dev/github.com/seccomp/libseccomp-golang)
+[![validate](https://github.com/seccomp/libseccomp-golang/actions/workflows/validate.yml/badge.svg)](https://github.com/seccomp/libseccomp-golang/actions/workflows/validate.yml)
+[![test](https://github.com/seccomp/libseccomp-golang/actions/workflows/test.yml/badge.svg)](https://github.com/seccomp/libseccomp-golang/actions/workflows/test.yml)
The libseccomp library provides an easy to use, platform independent, interface
to the Linux Kernel's syscall filtering mechanism. The libseccomp API is
@@ -20,32 +22,38 @@ The library source repository currently lives on GitHub at the following URLs:
* https://github.com/seccomp/libseccomp-golang
* https://github.com/seccomp/libseccomp
-The project mailing list is currently hosted on Google Groups at the URL below,
-please note that a Google account is not required to subscribe to the mailing
-list.
+Documentation for this package is also available at:
-* https://groups.google.com/d/forum/libseccomp
+* https://pkg.go.dev/github.com/seccomp/libseccomp-golang
-Documentation is also available at:
+## Verifying Releases
-* https://godoc.org/github.com/seccomp/libseccomp-golang
+Starting with libseccomp-golang v0.10.0, the git tag corresponding to each
+release should be signed by one of the libseccomp-golang maintainers. It is
+recommended that before use you verify the release tags using the following
+command:
-## Installing the package
+ % git tag -v <tag>
+
+At present, only the following keys, specified via the fingerprints below, are
+authorized to sign official libseccomp-golang release tags:
-The libseccomp-golang bindings require at least Go v1.2.1 and GCC v4.8.4;
-earlier versions may yield unpredictable results. If you meet these
-requirements you can install this package using the command below:
+ Paul Moore <paul@paul-moore.com>
+ 7100 AADF AE6E 6E94 0D2E 0AD6 55E4 5A5A E8CA 7C8A
- # go get github.com/seccomp/libseccomp-golang
+ Tom Hromatka <tom.hromatka@oracle.com>
+ 47A6 8FCE 37C7 D702 4FD6 5E11 356C E62C 2B52 4099
-## Testing the Library
+ Kir Kolyshkin <kolyshkin@gmail.com>
+ C242 8CD7 5720 FACD CF76 B6EA 17DE 5ECB 75A1 100E
-A number of tests and lint related recipes are provided in the Makefile, if
-you want to run the standard regression tests, you can excute the following:
+More information on GnuPG and git tag verification can be found at their
+respective websites: https://git-scm.com/docs/git and https://gnupg.org.
+
+## Installing the package
- # make check
+ % go get github.com/seccomp/libseccomp-golang
-In order to execute the 'make lint' recipe the 'golint' tool is needed, it
-can be found at:
+## Contributing
-* https://github.com/golang/lint
+See [CONTRIBUTING.md](CONTRIBUTING.md).
diff --git a/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md
new file mode 100644
index 000000000..f645d4efe
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md
@@ -0,0 +1,48 @@
+The libseccomp-golang Security Vulnerability Handling Process
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+
+This document document attempts to describe the processes through which
+sensitive security relevant bugs can be responsibly disclosed to the
+libseccomp-golang project and how the project maintainers should handle these
+reports. Just like the other libseccomp-golang process documents, this
+document should be treated as a guiding document and not a hard, unyielding set
+of regulations; the bug reporters and project maintainers are encouraged to
+work together to address the issues as best they can, in a manner which works
+best for all parties involved.
+
+### Reporting Problems
+
+Problems with the libseccomp-golang library that are not suitable for immediate
+public disclosure should be emailed to the current libseccomp-golang
+maintainers, the list is below. We typically request at most a 90 day time
+period to address the issue before it is made public, but we will make every
+effort to address the issue as quickly as possible and shorten the disclosure
+window.
+
+* Paul Moore, paul@paul-moore.com
+* Tom Hromatka, tom.hromatka@oracle.com
+* Kir Kolyshkin, kolyshkin@gmail.com
+
+### Resolving Sensitive Security Issues
+
+Upon disclosure of a bug, the maintainers should work together to investigate
+the problem and decide on a solution. In order to prevent an early disclosure
+of the problem, those working on the solution should do so privately and
+outside of the traditional libseccomp-golang development practices. One
+possible solution to this is to leverage the GitHub "Security" functionality to
+create a private development fork that can be shared among the maintainers, and
+optionally the reporter. A placeholder GitHub issue may be created, but
+details should remain extremely limited until such time as the problem has been
+fixed and responsibly disclosed. If a CVE, or other tag, has been assigned to
+the problem, the GitHub issue title should include the vulnerability tag once
+the problem has been disclosed.
+
+### Public Disclosure
+
+Whenever possible, responsible reporting and patching practices should be
+followed, including notification to the linux-distros and oss-security mailing
+lists.
+
+* https://oss-security.openwall.org/wiki/mailing-lists/distros
+* https://oss-security.openwall.org/wiki/mailing-lists/oss-security
diff --git a/vendor/github.com/seccomp/libseccomp-golang/go.sum b/vendor/github.com/seccomp/libseccomp-golang/go.sum
index 72ae16111..e69de29bb 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/go.sum
+++ b/vendor/github.com/seccomp/libseccomp-golang/go.sum
@@ -1,23 +0,0 @@
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 h1:gkI/wGGwpcG5W4hLCzZNGxA4wzWBGGDStRI1MrjDl2Q=
-golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
index e9b92e221..c23406754 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -1,5 +1,3 @@
-// +build linux
-
// Public API specification for libseccomp Go bindings
// Contains public API for the bindings
@@ -9,6 +7,7 @@
package seccomp
import (
+ "errors"
"fmt"
"os"
"runtime"
@@ -18,48 +17,36 @@ import (
"unsafe"
)
-// C wrapping code
-
-// To compile libseccomp-golang against a specific version of libseccomp:
-// cd ../libseccomp && mkdir -p prefix
-// ./configure --prefix=$PWD/prefix && make && make install
-// cd ../libseccomp-golang
-// PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make
-// LD_PRELOAD=$PWD/../libseccomp/prefix/lib/libseccomp.so.2.5.0 PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make test
-
-// #cgo pkg-config: libseccomp
// #include <stdlib.h>
// #include <seccomp.h>
import "C"
// Exported types
-// VersionError denotes that the system libseccomp version is incompatible
-// with this package.
+// VersionError represents an error when either the system libseccomp version
+// or the kernel version is too old to perform the operation requested.
type VersionError struct {
- message string
- minimum string
+ op string // operation that failed or would fail
+ major, minor, micro uint // minimally required libseccomp version
+ curAPI, minAPI uint // current and minimally required API versions
}
func init() {
// This forces the cgo libseccomp to initialize its internal API support state,
// which is necessary on older versions of libseccomp in order to work
// correctly.
- GetAPI()
+ _, _ = getAPI()
}
func (e VersionError) Error() string {
- messageStr := ""
- if e.message != "" {
- messageStr = e.message + ": "
+ if e.minAPI != 0 {
+ return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d and API level >= %d "+
+ "(current version: %d.%d.%d, API level: %d)",
+ e.op, e.major, e.minor, e.micro, e.minAPI,
+ verMajor, verMinor, verMicro, e.curAPI)
}
- minimumStr := ""
- if e.minimum != "" {
- minimumStr = e.minimum
- } else {
- minimumStr = "2.2.0"
- }
- return fmt.Sprintf("Libseccomp version too low: %sminimum supported is %s: detected %d.%d.%d", messageStr, minimumStr, verMajor, verMinor, verMicro)
+ return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d (current version: %d.%d.%d)",
+ e.op, e.major, e.minor, e.micro, verMajor, verMinor, verMicro)
}
// ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a
@@ -148,44 +135,46 @@ const (
// variables are invalid
ArchInvalid ScmpArch = iota
// ArchNative is the native architecture of the kernel
- ArchNative ScmpArch = iota
+ ArchNative
// ArchX86 represents 32-bit x86 syscalls
- ArchX86 ScmpArch = iota
+ ArchX86
// ArchAMD64 represents 64-bit x86-64 syscalls
- ArchAMD64 ScmpArch = iota
+ ArchAMD64
// ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers)
- ArchX32 ScmpArch = iota
+ ArchX32
// ArchARM represents 32-bit ARM syscalls
- ArchARM ScmpArch = iota
+ ArchARM
// ArchARM64 represents 64-bit ARM syscalls
- ArchARM64 ScmpArch = iota
+ ArchARM64
// ArchMIPS represents 32-bit MIPS syscalls
- ArchMIPS ScmpArch = iota
+ ArchMIPS
// ArchMIPS64 represents 64-bit MIPS syscalls
- ArchMIPS64 ScmpArch = iota
+ ArchMIPS64
// ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers)
- ArchMIPS64N32 ScmpArch = iota
+ ArchMIPS64N32
// ArchMIPSEL represents 32-bit MIPS syscalls (little endian)
- ArchMIPSEL ScmpArch = iota
+ ArchMIPSEL
// ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian)
- ArchMIPSEL64 ScmpArch = iota
+ ArchMIPSEL64
// ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian,
// 32-bit pointers)
- ArchMIPSEL64N32 ScmpArch = iota
+ ArchMIPSEL64N32
// ArchPPC represents 32-bit POWERPC syscalls
- ArchPPC ScmpArch = iota
+ ArchPPC
// ArchPPC64 represents 64-bit POWER syscalls (big endian)
- ArchPPC64 ScmpArch = iota
+ ArchPPC64
// ArchPPC64LE represents 64-bit POWER syscalls (little endian)
- ArchPPC64LE ScmpArch = iota
+ ArchPPC64LE
// ArchS390 represents 31-bit System z/390 syscalls
- ArchS390 ScmpArch = iota
+ ArchS390
// ArchS390X represents 64-bit System z/390 syscalls
- ArchS390X ScmpArch = iota
+ ArchS390X
// ArchPARISC represents 32-bit PA-RISC
- ArchPARISC ScmpArch = iota
+ ArchPARISC
// ArchPARISC64 represents 64-bit PA-RISC
- ArchPARISC64 ScmpArch = iota
+ ArchPARISC64
+ // ArchRISCV64 represents RISCV64
+ ArchRISCV64
)
const (
@@ -194,34 +183,36 @@ const (
// ActInvalid is a placeholder to ensure uninitialized ScmpAction
// variables are invalid
ActInvalid ScmpAction = iota
- // ActKill kills the thread that violated the rule. It is the same as ActKillThread.
+ // ActKillThread kills the thread that violated the rule.
// All other threads from the same thread group will continue to execute.
- ActKill ScmpAction = iota
+ ActKillThread
// ActTrap throws SIGSYS
- ActTrap ScmpAction = iota
+ ActTrap
// ActNotify triggers a userspace notification. This action is only usable when
// libseccomp API level 6 or higher is supported.
- ActNotify ScmpAction = iota
+ ActNotify
// ActErrno causes the syscall to return a negative error code. This
// code can be set with the SetReturnCode method
- ActErrno ScmpAction = iota
+ ActErrno
// ActTrace causes the syscall to notify tracing processes with the
// given error code. This code can be set with the SetReturnCode method
- ActTrace ScmpAction = iota
+ ActTrace
// ActAllow permits the syscall to continue execution
- ActAllow ScmpAction = iota
+ ActAllow
// ActLog permits the syscall to continue execution after logging it.
// This action is only usable when libseccomp API level 3 or higher is
// supported.
- ActLog ScmpAction = iota
- // ActKillThread kills the thread that violated the rule. It is the same as ActKill.
- // All other threads from the same thread group will continue to execute.
- ActKillThread ScmpAction = iota
+ ActLog
// ActKillProcess kills the process that violated the rule.
// All threads in the thread group are also terminated.
// This action is only usable when libseccomp API level 3 or higher is
// supported.
- ActKillProcess ScmpAction = iota
+ ActKillProcess
+ // ActKill kills the thread that violated the rule.
+ // All other threads from the same thread group will continue to execute.
+ //
+ // Deprecated: use ActKillThread
+ ActKill = ActKillThread
)
const (
@@ -234,36 +225,35 @@ const (
CompareInvalid ScmpCompareOp = iota
// CompareNotEqual returns true if the argument is not equal to the
// given value
- CompareNotEqual ScmpCompareOp = iota
+ CompareNotEqual
// CompareLess returns true if the argument is less than the given value
- CompareLess ScmpCompareOp = iota
+ CompareLess
// CompareLessOrEqual returns true if the argument is less than or equal
// to the given value
- CompareLessOrEqual ScmpCompareOp = iota
+ CompareLessOrEqual
// CompareEqual returns true if the argument is equal to the given value
- CompareEqual ScmpCompareOp = iota
+ CompareEqual
// CompareGreaterEqual returns true if the argument is greater than or
// equal to the given value
- CompareGreaterEqual ScmpCompareOp = iota
+ CompareGreaterEqual
// CompareGreater returns true if the argument is greater than the given
// value
- CompareGreater ScmpCompareOp = iota
- // CompareMaskedEqual returns true if the argument is equal to the given
- // value, when masked (bitwise &) against the second given value
- CompareMaskedEqual ScmpCompareOp = iota
+ CompareGreater
+ // CompareMaskedEqual returns true if the masked argument value is
+ // equal to the masked datum value. Mask is the first argument, and
+ // datum is the second one.
+ CompareMaskedEqual
)
-var (
- // ErrSyscallDoesNotExist represents an error condition where
- // libseccomp is unable to resolve the syscall
- ErrSyscallDoesNotExist = fmt.Errorf("could not resolve syscall name")
-)
+// ErrSyscallDoesNotExist represents an error condition where
+// libseccomp is unable to resolve the syscall.
+var ErrSyscallDoesNotExist = errors.New("could not resolve syscall name")
const (
// Userspace notification response flags
// NotifRespFlagContinue tells the kernel to continue executing the system
- // call that triggered the notification. Must only be used when the notication
+ // call that triggered the notification. Must only be used when the notification
// response's error is 0.
NotifRespFlagContinue uint32 = 1
)
@@ -314,6 +304,8 @@ func GetArchFromString(arch string) (ScmpArch, error) {
return ArchPARISC, nil
case "parisc64":
return ArchPARISC64, nil
+ case "riscv64":
+ return ArchRISCV64, nil
default:
return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch)
}
@@ -358,6 +350,8 @@ func (a ScmpArch) String() string {
return "parisc"
case ArchPARISC64:
return "parisc64"
+ case ArchRISCV64:
+ return "riscv64"
case ArchNative:
return "native"
case ArchInvalid:
@@ -394,7 +388,7 @@ func (a ScmpCompareOp) String() string {
// String returns a string representation of a seccomp match action
func (a ScmpAction) String() string {
switch a & 0xFFFF {
- case ActKill, ActKillThread:
+ case ActKillThread:
return "Action: Kill thread"
case ActKillProcess:
return "Action: Kill process"
@@ -556,14 +550,14 @@ func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCo
return condStruct, err
}
- if comparison == CompareInvalid {
- return condStruct, fmt.Errorf("invalid comparison operator")
+ if err := sanitizeCompareOp(comparison); err != nil {
+ return condStruct, err
} else if arg > 5 {
return condStruct, fmt.Errorf("syscalls only have up to 6 arguments (%d given)", arg)
} else if len(values) > 2 {
return condStruct, fmt.Errorf("conditions can have at most 2 arguments (%d given)", len(values))
} else if len(values) == 0 {
- return condStruct, fmt.Errorf("must provide at least one value to compare against")
+ return condStruct, errors.New("must provide at least one value to compare against")
}
condStruct.Argument = arg
@@ -618,7 +612,7 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
fPtr := C.seccomp_init(defaultAction.toNative())
if fPtr == nil {
- return nil, fmt.Errorf("could not create filter")
+ return nil, errors.New("could not create filter")
}
filter := new(ScmpFilter)
@@ -630,7 +624,7 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
// If the kernel does not support TSYNC, allow us to continue without error.
if err := filter.setFilterAttr(filterAttrTsync, 0x1); err != nil && err != syscall.ENOTSUP {
filter.Release()
- return nil, fmt.Errorf("could not create filter - error setting tsync bit: %v", err)
+ return nil, fmt.Errorf("could not create filter: error setting tsync bit: %w", err)
}
return filter, nil
@@ -702,14 +696,14 @@ func (f *ScmpFilter) Merge(src *ScmpFilter) error {
defer src.lock.Unlock()
if !src.valid || !f.valid {
- return fmt.Errorf("one or more of the filter contexts is invalid or uninitialized")
+ return errors.New("one or more of the filter contexts is invalid or uninitialized")
}
// Merge the filters
if retCode := C.seccomp_merge(f.filterCtx, src.filterCtx); retCode != 0 {
e := errRc(retCode)
if e == syscall.EINVAL {
- return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter")
+ return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter: %w", e)
}
return e
}
@@ -874,10 +868,8 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
func (f *ScmpFilter) GetLogBit() (bool, error) {
log, err := f.getFilterAttr(filterAttrLog)
if err != nil {
- // Ignore error, if not supported returns apiLevel == 0
- apiLevel, _ := GetAPI()
- if apiLevel < 3 {
- return false, fmt.Errorf("getting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
+ if e := checkAPI("GetLogBit", 3, 2, 4, 0); e != nil {
+ err = e
}
return false, err
@@ -899,9 +891,8 @@ func (f *ScmpFilter) GetLogBit() (bool, error) {
func (f *ScmpFilter) GetSSB() (bool, error) {
ssb, err := f.getFilterAttr(filterAttrSSB)
if err != nil {
- api, apiErr := getAPI()
- if (apiErr != nil && api == 0) || (apiErr == nil && api < 4) {
- return false, fmt.Errorf("getting the SSB flag is only supported in libseccomp 2.5.0 and newer with API level 4 or higher")
+ if e := checkAPI("GetSSB", 4, 2, 5, 0); e != nil {
+ err = e
}
return false, err
@@ -914,6 +905,42 @@ func (f *ScmpFilter) GetSSB() (bool, error) {
return true, nil
}
+// GetOptimize returns the current optimization level of the filter,
+// or an error if an issue was encountered retrieving the value.
+// See SetOptimize for more details.
+func (f *ScmpFilter) GetOptimize() (int, error) {
+ level, err := f.getFilterAttr(filterAttrOptimize)
+ if err != nil {
+ if e := checkAPI("GetOptimize", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+
+ return 0, err
+ }
+
+ return int(level), nil
+}
+
+// GetRawRC returns the current state of RawRC flag, or an error
+// if an issue was encountered retrieving the value.
+// See SetRawRC for more details.
+func (f *ScmpFilter) GetRawRC() (bool, error) {
+ rawrc, err := f.getFilterAttr(filterAttrRawRC)
+ if err != nil {
+ if e := checkAPI("GetRawRC", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+
+ return false, err
+ }
+
+ if rawrc == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
// SetBadArchAction sets the default action taken on a syscall for an
// architecture not in the filter, or an error if an issue was encountered
// setting the value.
@@ -953,10 +980,8 @@ func (f *ScmpFilter) SetLogBit(state bool) error {
err := f.setFilterAttr(filterAttrLog, toSet)
if err != nil {
- // Ignore error, if not supported returns apiLevel == 0
- apiLevel, _ := GetAPI()
- if apiLevel < 3 {
- return fmt.Errorf("setting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
+ if e := checkAPI("SetLogBit", 3, 2, 4, 0); e != nil {
+ err = e
}
}
@@ -976,9 +1001,52 @@ func (f *ScmpFilter) SetSSB(state bool) error {
err := f.setFilterAttr(filterAttrSSB, toSet)
if err != nil {
- api, apiErr := getAPI()
- if (apiErr != nil && api == 0) || (apiErr == nil && api < 4) {
- return fmt.Errorf("setting the SSB flag is only supported in libseccomp 2.5.0 and newer with API level 4 or higher")
+ if e := checkAPI("SetSSB", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+ }
+
+ return err
+}
+
+// SetOptimize sets optimization level of the seccomp filter. By default
+// libseccomp generates a set of sequential "if" statements for each rule in
+// the filter. SetSyscallPriority can be used to prioritize the order for the
+// default cause. The binary tree optimization sorts by syscall numbers and
+// generates consistent O(log n) filter traversal for every rule in the filter.
+// The binary tree may be advantageous for large filters. Note that
+// SetSyscallPriority is ignored when level == 2.
+//
+// The different optimization levels are:
+// 0: Reserved value, not currently used.
+// 1: Rules sorted by priority and complexity (DEFAULT).
+// 2: Binary tree sorted by syscall number.
+func (f *ScmpFilter) SetOptimize(level int) error {
+ cLevel := C.uint32_t(level)
+
+ err := f.setFilterAttr(filterAttrOptimize, cLevel)
+ if err != nil {
+ if e := checkAPI("SetOptimize", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+ }
+
+ return err
+}
+
+// SetRawRC sets whether libseccomp should pass system error codes back to the
+// caller, instead of the default ECANCELED. Defaults to false.
+func (f *ScmpFilter) SetRawRC(state bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if state {
+ toSet = 0x1
+ }
+
+ err := f.setFilterAttr(filterAttrRawRC, toSet)
+ if err != nil {
+ if e := checkAPI("SetRawRC", 4, 2, 5, 0); e != nil {
+ err = e
}
}
@@ -1029,9 +1097,6 @@ func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error {
// AddRuleConditional adds a single rule for a conditional action on a syscall.
// Returns an error if an issue was encountered adding the rule.
// All conditions must match for the rule to match.
-// There is a bug in library versions below v2.2.1 which can, in some cases,
-// cause conditions to be lost when more than one are used. Consequently,
-// AddRuleConditional is disabled on library versions lower than v2.2.1
func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
return f.addRuleGeneric(call, action, false, conds)
}
@@ -1043,9 +1108,6 @@ func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, con
// The rule will function exactly as described, but it may not function identically
// (or be able to be applied to) all architectures.
// Returns an error if an issue was encountered adding the rule.
-// There is a bug in library versions below v2.2.1 which can, in some cases,
-// cause conditions to be lost when more than one are used. Consequently,
-// AddRuleConditionalExact is disabled on library versions lower than v2.2.1
func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
return f.addRuleGeneric(call, action, true, conds)
}
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
index 8dc7b296f..0a7fd34f5 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -1,11 +1,10 @@
-// +build linux
-
// Internal functions for libseccomp Go bindings
// No exported functions
package seccomp
import (
+ "errors"
"fmt"
"syscall"
)
@@ -27,10 +26,10 @@ import (
#include <stdlib.h>
#include <seccomp.h>
-#if SCMP_VER_MAJOR < 2
-#error Minimum supported version of Libseccomp is v2.2.0
-#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
-#error Minimum supported version of Libseccomp is v2.2.0
+#if (SCMP_VER_MAJOR < 2) || \
+ (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 3) || \
+ (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR == 3 && SCMP_VER_MICRO < 1)
+#error This package requires libseccomp >= v2.3.1
#endif
#define ARCH_BAD ~0
@@ -65,6 +64,10 @@ const uint32_t C_ARCH_BAD = ARCH_BAD;
#define SCMP_ARCH_PARISC64 ARCH_BAD
#endif
+#ifndef SCMP_ARCH_RISCV64
+#define SCMP_ARCH_RISCV64 ARCH_BAD
+#endif
+
const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE;
const uint32_t C_ARCH_X86 = SCMP_ARCH_X86;
const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64;
@@ -84,6 +87,7 @@ const uint32_t C_ARCH_S390 = SCMP_ARCH_S390;
const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
const uint32_t C_ARCH_PARISC = SCMP_ARCH_PARISC;
const uint32_t C_ARCH_PARISC64 = SCMP_ARCH_PARISC64;
+const uint32_t C_ARCH_RISCV64 = SCMP_ARCH_RISCV64;
#ifndef SCMP_ACT_LOG
#define SCMP_ACT_LOG 0x7ffc0000U
@@ -113,20 +117,25 @@ const uint32_t C_ACT_NOTIFY = SCMP_ACT_NOTIFY;
// The libseccomp SCMP_FLTATR_CTL_LOG member of the scmp_filter_attr enum was
// added in v2.4.0
-#if (SCMP_VER_MAJOR < 2) || \
- (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4)
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4
#define SCMP_FLTATR_CTL_LOG _SCMP_FLTATR_MIN
#endif
+
+// The following SCMP_FLTATR_* were added in libseccomp v2.5.0.
#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5
-#define SCMP_FLTATR_CTL_SSB _SCMP_FLTATR_MIN
+#define SCMP_FLTATR_CTL_SSB _SCMP_FLTATR_MIN
+#define SCMP_FLTATR_CTL_OPTIMIZE _SCMP_FLTATR_MIN
+#define SCMP_FLTATR_API_SYSRAWRC _SCMP_FLTATR_MIN
#endif
-const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
-const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
-const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
-const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
-const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG;
-const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB;
+const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
+const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
+const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
+const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
+const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG;
+const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB;
+const uint32_t C_ATTRIBUTE_OPTIMIZE = (uint32_t)SCMP_FLTATR_CTL_OPTIMIZE;
+const uint32_t C_ATTRIBUTE_SYSRAWRC = (uint32_t)SCMP_FLTATR_API_SYSRAWRC;
const int C_CMP_NE = (int)SCMP_CMP_NE;
const int C_CMP_LT = (int)SCMP_CMP_LT;
@@ -173,8 +182,7 @@ unsigned int get_micro_version()
#endif
// The libseccomp API level functions were added in v2.4.0
-#if (SCMP_VER_MAJOR < 2) || \
- (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4)
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4
const unsigned int seccomp_api_get(void)
{
// libseccomp-golang requires libseccomp v2.2.0, at a minimum, which
@@ -217,8 +225,7 @@ void add_struct_arg_cmp(
}
// The seccomp notify API functions were added in v2.5.0
-#if (SCMP_VER_MAJOR < 2) || \
- (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5)
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5
struct seccomp_data {
int nr;
@@ -270,11 +277,13 @@ type scmpFilterAttr uint32
const (
filterAttrActDefault scmpFilterAttr = iota
- filterAttrActBadArch scmpFilterAttr = iota
- filterAttrNNP scmpFilterAttr = iota
- filterAttrTsync scmpFilterAttr = iota
- filterAttrLog scmpFilterAttr = iota
- filterAttrSSB scmpFilterAttr = iota
+ filterAttrActBadArch
+ filterAttrNNP
+ filterAttrTsync
+ filterAttrLog
+ filterAttrSSB
+ filterAttrOptimize
+ filterAttrRawRC
)
const (
@@ -282,9 +291,9 @@ const (
scmpError C.int = -1
// Comparison boundaries to check for architecture validity
archStart ScmpArch = ArchNative
- archEnd ScmpArch = ArchPARISC64
+ archEnd ScmpArch = ArchRISCV64
// Comparison boundaries to check for action validity
- actionStart ScmpAction = ActKill
+ actionStart ScmpAction = ActKillThread
actionEnd ScmpAction = ActKillProcess
// Comparison boundaries to check for comparison operator validity
compareOpStart ScmpCompareOp = CompareNotEqual
@@ -292,8 +301,9 @@ const (
)
var (
- // Error thrown on bad filter context
- errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
+ // errBadFilter is thrown on bad filter context.
+ errBadFilter = errors.New("filter is invalid or uninitialized")
+ errDefAction = errors.New("requested action matches default action of filter")
// Constants representing library major, minor, and micro versions
verMajor = uint(C.get_major_version())
verMinor = uint(C.get_minor_version())
@@ -302,26 +312,35 @@ var (
// Nonexported functions
-// Check if library version is greater than or equal to the given one
-func checkVersionAbove(major, minor, micro uint) bool {
- return (verMajor > major) ||
+// checkVersion returns an error if the libseccomp version being used
+// is less than the one specified by major, minor, and micro arguments.
+// Argument op is an arbitrary non-empty operation description, which
+// is used as a part of the error message returned.
+//
+// Most users should use checkAPI instead.
+func checkVersion(op string, major, minor, micro uint) error {
+ if (verMajor > major) ||
(verMajor == major && verMinor > minor) ||
- (verMajor == major && verMinor == minor && verMicro >= micro)
+ (verMajor == major && verMinor == minor && verMicro >= micro) {
+ return nil
+ }
+ return &VersionError{
+ op: op,
+ major: major,
+ minor: minor,
+ micro: micro,
+ }
}
-// Ensure that the library is supported, i.e. >= 2.2.0.
func ensureSupportedVersion() error {
- if !checkVersionAbove(2, 2, 0) {
- return VersionError{}
- }
- return nil
+ return checkVersion("seccomp", 2, 3, 1)
}
// Get the API level
func getAPI() (uint, error) {
api := C.seccomp_api_get()
if api == 0 {
- return 0, fmt.Errorf("API level operations are not supported")
+ return 0, errors.New("API level operations are not supported")
}
return uint(api), nil
@@ -330,11 +349,12 @@ func getAPI() (uint, error) {
// Set the API level
func setAPI(api uint) error {
if retCode := C.seccomp_api_set(C.uint(api)); retCode != 0 {
- if errRc(retCode) == syscall.EOPNOTSUPP {
- return fmt.Errorf("API level operations are not supported")
+ e := errRc(retCode)
+ if e == syscall.EOPNOTSUPP {
+ return errors.New("API level operations are not supported")
}
- return fmt.Errorf("could not set API level: %v", retCode)
+ return fmt.Errorf("could not set API level: %w", e)
}
return nil
@@ -392,7 +412,7 @@ func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error
// Wrapper for seccomp_rule_add_... functions
func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, length C.uint, cond C.scmp_cast_t) error {
if length != 0 && cond == nil {
- return fmt.Errorf("null conditions list, but length is nonzero")
+ return errors.New("null conditions list, but length is nonzero")
}
var retCode C.int
@@ -406,10 +426,12 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b
switch e := errRc(retCode); e {
case syscall.EFAULT:
return fmt.Errorf("unrecognized syscall %#x", int32(call))
- case syscall.EPERM:
- return fmt.Errorf("requested action matches default action of filter")
+ // libseccomp >= v2.5.0 returns EACCES, older versions return EPERM.
+ // TODO: remove EPERM once libseccomp < v2.5.0 is not supported.
+ case syscall.EPERM, syscall.EACCES:
+ return errDefAction
case syscall.EINVAL:
- return fmt.Errorf("two checks on same syscall argument")
+ return errors.New("two checks on same syscall argument")
default:
return e
}
@@ -432,17 +454,9 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
return err
}
} else {
- // We don't support conditional filtering in library version v2.1
- if !checkVersionAbove(2, 2, 1) {
- return VersionError{
- message: "conditional filtering is not supported",
- minimum: "2.2.1",
- }
- }
-
argsArr := C.make_arg_cmp_array(C.uint(len(conds)))
if argsArr == nil {
- return fmt.Errorf("error allocating memory for conditions")
+ return errors.New("error allocating memory for conditions")
}
defer C.free(argsArr)
@@ -482,7 +496,7 @@ func sanitizeAction(in ScmpAction) error {
}
if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 {
- return fmt.Errorf("highest 16 bits must be zeroed except for Trace and Errno")
+ return errors.New("highest 16 bits must be zeroed except for Trace and Errno")
}
return nil
@@ -536,6 +550,8 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) {
return ArchPARISC, nil
case C.C_ARCH_PARISC64:
return ArchPARISC64, nil
+ case C.C_ARCH_RISCV64:
+ return ArchRISCV64, nil
default:
return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a))
}
@@ -580,6 +596,8 @@ func (a ScmpArch) toNative() C.uint32_t {
return C.C_ARCH_PARISC
case ArchPARISC64:
return C.C_ARCH_PARISC64
+ case ArchRISCV64:
+ return C.C_ARCH_RISCV64
case ArchNative:
return C.C_ARCH_NATIVE
default:
@@ -612,8 +630,6 @@ func (a ScmpCompareOp) toNative() C.int {
func actionFromNative(a C.uint32_t) (ScmpAction, error) {
aTmp := a & 0xFFFF
switch a & 0xFFFF0000 {
- case C.C_ACT_KILL:
- return ActKill, nil
case C.C_ACT_KILL_PROCESS:
return ActKillProcess, nil
case C.C_ACT_KILL_THREAD:
@@ -638,8 +654,6 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
// Only use with sanitized actions, no error handling
func (a ScmpAction) toNative() C.uint32_t {
switch a & 0xFFFF {
- case ActKill:
- return C.C_ACT_KILL
case ActKillProcess:
return C.C_ACT_KILL_PROCESS
case ActKillThread:
@@ -676,15 +690,15 @@ func (a scmpFilterAttr) toNative() uint32 {
return uint32(C.C_ATTRIBUTE_LOG)
case filterAttrSSB:
return uint32(C.C_ATTRIBUTE_SSB)
+ case filterAttrOptimize:
+ return uint32(C.C_ATTRIBUTE_OPTIMIZE)
+ case filterAttrRawRC:
+ return uint32(C.C_ATTRIBUTE_SYSRAWRC)
default:
return 0x0
}
}
-func (a ScmpSyscall) toNative() C.uint32_t {
- return C.uint32_t(a)
-}
-
func syscallFromNative(a C.int) ScmpSyscall {
return ScmpSyscall(a)
}
@@ -724,9 +738,34 @@ func (scmpResp *ScmpNotifResp) toNative(resp *C.struct_seccomp_notif_resp) {
resp.flags = C.__u32(scmpResp.Flags)
}
+// checkAPI checks that both the API level and the seccomp version is equal to
+// or greater than the specified minLevel and major, minor, micro,
+// respectively, and returns an error otherwise. Argument op is an arbitrary
+// non-empty operation description, used as a part of the error message
+// returned.
+func checkAPI(op string, minLevel uint, major, minor, micro uint) error {
+ // Ignore error from getAPI, as it returns level == 0 in case of error.
+ level, _ := getAPI()
+ if level >= minLevel {
+ return checkVersion(op, major, minor, micro)
+ }
+ return &VersionError{
+ op: op,
+ curAPI: level,
+ minAPI: minLevel,
+ major: major,
+ minor: minor,
+ micro: micro,
+ }
+}
+
// Userspace Notification API
// Calls to C.seccomp_notify* hidden from seccomp.go
+func notifSupported() error {
+ return checkAPI("seccomp notification", 6, 2, 5, 0)
+}
+
func (f *ScmpFilter) getNotifFd() (ScmpFd, error) {
f.lock.Lock()
defer f.lock.Unlock()
@@ -734,11 +773,8 @@ func (f *ScmpFilter) getNotifFd() (ScmpFd, error) {
if !f.valid {
return -1, errBadFilter
}
-
- // Ignore error, if not supported returns apiLevel == 0
- apiLevel, _ := GetAPI()
- if apiLevel < 6 {
- return -1, fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel)
+ if err := notifSupported(); err != nil {
+ return -1, err
}
fd := C.seccomp_notify_fd(f.filterCtx)
@@ -750,10 +786,8 @@ func notifReceive(fd ScmpFd) (*ScmpNotifReq, error) {
var req *C.struct_seccomp_notif
var resp *C.struct_seccomp_notif_resp
- // Ignore error, if not supported returns apiLevel == 0
- apiLevel, _ := GetAPI()
- if apiLevel < 6 {
- return nil, fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel)
+ if err := notifSupported(); err != nil {
+ return nil, err
}
// we only use the request here; the response is unused
@@ -789,13 +823,11 @@ func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error {
var req *C.struct_seccomp_notif
var resp *C.struct_seccomp_notif_resp
- // Ignore error, if not supported returns apiLevel == 0
- apiLevel, _ := GetAPI()
- if apiLevel < 6 {
- return fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel)
+ if err := notifSupported(); err != nil {
+ return err
}
- // we only use the reponse here; the request is discarded
+ // we only use the response here; the request is discarded
if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 {
return errRc(retCode)
}
@@ -827,10 +859,8 @@ func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error {
}
func notifIDValid(fd ScmpFd, id uint64) error {
- // Ignore error, if not supported returns apiLevel == 0
- apiLevel, _ := GetAPI()
- if apiLevel < 6 {
- return fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel)
+ if err := notifSupported(); err != nil {
+ return err
}
for {
diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md
deleted file mode 100644
index 8a23b4f85..000000000
--- a/vendor/github.com/spf13/cobra/CHANGELOG.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Cobra Changelog
-
-## v1.1.3
-
-* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility
-
-## v1.1.2
-
-### Notable Changes
-
-* Bump license year to 2021 in golden files (#1309) @Bowbaq
-* Enhance PowerShell completion with custom comp (#1208) @Luap99
-* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670
-* Documentation readability improvements (#1228 etc.) @zaataylor etc.
-* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor
-
-## v1.1.1
-
-* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context.
-* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context.
-
-## v1.1.0
-
-### Notable Changes
-
-* Extend Go completions and revamp zsh comp (#1070)
-* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb
-* Add completion for help command (#1136)
-* Complete subcommands when TraverseChildren is set (#1171)
-* Fix stderr printing functions (#894)
-* fix: fish output redirection (#1247)
-
-## v1.0.0
-
-Announcing v1.0.0 of Cobra. 🎉
-
-### Notable Changes
-* Fish completion (including support for Go custom completion) @marckhouzam
-* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam
-* Remove/replace SetOutput on Command - deprecated @jpmcb
-* add support for autolabel stale PR @xchapter7x
-* Add Labeler Actions @xchapter7x
-* Custom completions coded in Go (instead of Bash) @marckhouzam
-* Partial Revert of #922 @jharshman
-* Add Makefile to project @jharshman
-* Correct documentation for InOrStdin @desponda
-* Apply formatting to templates @jharshman
-* Revert change so help is printed on stdout again @marckhouzam
-* Update md2man to v2.0.0 @pdf
-* update viper to v1.4.0 @umarcor
-* Update cmd/root.go example in README.md @jharshman
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 7adef143b..2bf152082 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -2,7 +2,7 @@
Cobra is a library for creating powerful modern CLI applications.
-Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/),
+Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
@@ -28,7 +28,7 @@ Cobra provides:
* Automatically generated man pages for your application
* Command aliases so you can change things without breaking them
* The flexibility to define your own help, usage, etc.
-* Optional seamless integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps
# Concepts
@@ -102,7 +102,7 @@ It can be installed by running:
go install github.com/spf13/cobra-cli@latest
```
-For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/master/README.md)
+For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md).
diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go
new file mode 100644
index 000000000..0c631913d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.go
@@ -0,0 +1,49 @@
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+const (
+ activeHelpMarker = "_activeHelp_ "
+ // The below values should not be changed: programs will be using them explicitly
+ // in their user documentation, and users will be using them explicitly.
+ activeHelpEnvVarSuffix = "_ACTIVE_HELP"
+ activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP"
+ activeHelpGlobalDisable = "0"
+)
+
+// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
+// Such strings will be processed by the completion script and will be shown as ActiveHelp
+// to the user.
+// The array parameter should be the array that will contain the completions.
+// This function can be called multiple times before and/or after completions are added to
+// the array. Each time this function is called with the same array, the new
+// ActiveHelp line will be shown below the previous ones when completion is triggered.
+func AppendActiveHelp(compArray []string, activeHelpStr string) []string {
+ return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr))
+}
+
+// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
+// <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the root command in upper
+// case, with all - replaced by _.
+// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
+// is set to "0".
+func GetActiveHelpConfig(cmd *Command) string {
+ activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar)
+ if activeHelpCfg != activeHelpGlobalDisable {
+ activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name()))
+ }
+ return activeHelpCfg
+}
+
+// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
+// variable. It has the format <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the
+// root command in upper case, with all - replaced by _.
+func activeHelpEnvVar(name string) string {
+ // This format should not be changed: users will be using it explicitly.
+ activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix))
+ return strings.ReplaceAll(activeHelpEnvVar, "-", "_")
+}
diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md
new file mode 100644
index 000000000..5e7f59af3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.md
@@ -0,0 +1,157 @@
+# Active Help
+
+Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion.
+
+For example,
+```
+bash-5.1$ helm repo add [tab]
+You must choose a name for the repo you are adding.
+
+bash-5.1$ bin/helm package [tab]
+Please specify the path to the chart to package
+
+bash-5.1$ bin/helm package [tab][tab]
+bin/ internal/ scripts/ pkg/ testdata/
+```
+
+**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program.
+## Supported shells
+
+Active Help is currently only supported for the following shells:
+- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed.
+- Zsh
+
+## Adding Active Help messages
+
+As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md).
+
+Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details.
+
+### Active Help for nouns
+
+Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example:
+
+```go
+cmd := &cobra.Command{
+ Use: "add [NAME] [URL]",
+ Short: "add a chart repository",
+ Args: require.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return addRepo(args)
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ var comps []string
+ if len(args) == 0 {
+ comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
+ } else if len(args) == 1 {
+ comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
+ } else {
+ comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior:
+```
+bash-5.1$ helm repo add [tab]
+You must choose a name for the repo you are adding
+
+bash-5.1$ helm repo add grafana [tab]
+You must specify the URL for the repo you are adding
+
+bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab]
+This command does not take any more arguments
+```
+**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions.
+
+### Active Help for flags
+
+Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example:
+```go
+_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 2 {
+ return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp
+ }
+ return compVersionFlag(args[1], toComplete)
+ })
+```
+The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag.
+```
+bash-5.1$ bin/helm install myrelease --version 2.0.[tab]
+You must first specify the chart to install before the --version flag can be completed
+
+bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab]
+2.0.1 2.0.2 2.0.3
+```
+
+## User control of Active Help
+
+You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any.
+Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration.
+
+The way to configure Active Help is to use the program's Active Help environment
+variable. That variable is named `<PROGRAM>_ACTIVE_HELP` where `<PROGRAM>` is the name of your
+program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever
+Active Help configuration values are supported by the program.
+
+For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user
+would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell.
+
+For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the
+Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages
+should or should not be added (instead of reading the environment variable directly).
+
+For example:
+```go
+ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ activeHelpLevel := cobra.GetActiveHelpConfig(cmd)
+
+ var comps []string
+ if len(args) == 0 {
+ if activeHelpLevel != "off" {
+ comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
+ }
+ } else if len(args) == 1 {
+ if activeHelpLevel != "off" {
+ comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
+ }
+ } else {
+ if activeHelpLevel == "local" {
+ comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
+ }
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp
+},
+```
+**Note 1**: If the `<PROGRAM>_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly.
+
+**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `<PROGRAM>_ACTIVE_HELP` is set to.
+
+**Note 3**: If the user does not set `<PROGRAM>_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string.
+## Active Help with Cobra's default completion command
+
+Cobra provides a default `completion` command for programs that wish to use it.
+When using the default `completion` command, Active Help is configurable in the same
+fashion as described above using environment variables. You may wish to document this in more
+details for your users.
+
+## Debugging Active Help
+
+Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details.
+
+When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `<PROGRAM>_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below:
+```
+$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h<ENTER>
+bitnami/haproxy
+bitnami/harbor
+_activeHelp_ WARNING: cannot re-use a name that is still in use
+:0
+Completion ended with directive: ShellCompDirectiveDefault
+
+$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h<ENTER>
+bitnami/haproxy
+bitnami/harbor
+:0
+Completion ended with directive: ShellCompDirectiveDefault
+```
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 6c360c595..cb7e19537 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -73,7 +73,8 @@ __%[1]s_handle_go_custom_completion()
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly %[1]s allows to handle aliases
args=("${words[@]:1}")
- requestComp="${words[0]} %[2]s ${args[*]}"
+ # Disable ActiveHelp which is not supported for bash completion v1
+ requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
@@ -99,7 +100,7 @@ __%[1]s_handle_go_custom_completion()
directive=0
fi
__%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
- __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
# Error code. No completion.
@@ -125,7 +126,7 @@ __%[1]s_handle_go_custom_completion()
local fullFilter filter filteringCmd
# Do not use quotes around the $out variable or else newline
# characters will be kept.
- for filter in ${out[*]}; do
+ for filter in ${out}; do
fullFilter+="$filter|"
done
@@ -136,7 +137,7 @@ __%[1]s_handle_go_custom_completion()
# File completion for directories only
local subdir
# Use printf to strip any trailing newline
- subdir=$(printf "%%s" "${out[0]}")
+ subdir=$(printf "%%s" "${out}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
__%[1]s_handle_subdirs_in_dir_flag "$subdir"
@@ -147,7 +148,7 @@ __%[1]s_handle_go_custom_completion()
else
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
- done < <(compgen -W "${out[*]}" -- "$cur")
+ done < <(compgen -W "${out}" -- "$cur")
fi
}
@@ -383,11 +384,11 @@ __%[1]s_handle_word()
`, name, ShellCompNoDescRequestCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
func writePostscript(buf io.StringWriter, name string) {
- name = strings.Replace(name, ":", "__", -1)
+ name = strings.ReplaceAll(name, ":", "__")
WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
WriteStringAndCheck(buf, fmt.Sprintf(`{
local cur prev words cword split
@@ -645,8 +646,8 @@ func gen(buf io.StringWriter, cmd *Command) {
gen(buf, c)
}
commandName := cmd.CommandPath()
- commandName = strings.Replace(commandName, " ", "_", -1)
- commandName = strings.Replace(commandName, ":", "__", -1)
+ commandName = strings.ReplaceAll(commandName, " ", "_")
+ commandName = strings.ReplaceAll(commandName, ":", "__")
if cmd.Root() == cmd {
WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go
index 82d26c175..767bf0312 100644
--- a/vendor/github.com/spf13/cobra/bash_completionsV2.go
+++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -78,7 +78,7 @@ __%[1]s_get_completion_results() {
directive=0
fi
__%[1]s_debug "The completion directive is: ${directive}"
- __%[1]s_debug "The completions are: ${out[*]}"
+ __%[1]s_debug "The completions are: ${out}"
}
__%[1]s_process_completion_results() {
@@ -111,13 +111,18 @@ __%[1]s_process_completion_results() {
fi
fi
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __%[1]s_extract_activeHelp
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
- # Do not use quotes around the $out variable or else newline
+ # Do not use quotes around the $completions variable or else newline
# characters will be kept.
- for filter in ${out[*]}; do
+ for filter in ${completions[*]}; do
fullFilter+="$filter|"
done
@@ -129,7 +134,7 @@ __%[1]s_process_completion_results() {
# Use printf to strip any trailing newline
local subdir
- subdir=$(printf "%%s" "${out[0]}")
+ subdir=$(printf "%%s" "${completions[0]}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
@@ -143,6 +148,43 @@ __%[1]s_process_completion_results() {
__%[1]s_handle_special_char "$cur" :
__%[1]s_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ if [ ${#activeHelp} -ne 0 ]; then
+ printf "\n";
+ printf "%%s\n" "${activeHelp[@]}"
+ printf "\n"
+
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%%s" "${COMP_LINE[@]}"
+ fi
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__%[1]s_extract_activeHelp() {
+ local activeHelpMarker="%[8]s"
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
+ comp=${comp:endIndex}
+ __%[1]s_debug "ActiveHelp found: $comp"
+ if [ -n "$comp" ]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done < <(printf "%%s\n" "${out}")
}
__%[1]s_handle_completion_types() {
@@ -154,17 +196,16 @@ __%[1]s_handle_completion_types() {
# If the user requested inserting one completion at a time, or all
# completions at once on the command-line we must remove the descriptions.
# https://github.com/spf13/cobra/issues/1508
- local tab comp
- tab=$(printf '\t')
+ local tab=$'\t' comp
while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
# Strip any description
comp=${comp%%%%$tab*}
# Only consider the completions that match
- comp=$(compgen -W "$comp" -- "$cur")
- if [ -n "$comp" ]; then
+ if [[ $comp == "$cur"* ]]; then
COMPREPLY+=("$comp")
fi
- done < <(printf "%%s\n" "${out[@]}")
+ done < <(printf "%%s\n" "${completions[@]}")
;;
*)
@@ -175,44 +216,37 @@ __%[1]s_handle_completion_types() {
}
__%[1]s_handle_standard_completion_case() {
- local tab comp
- tab=$(printf '\t')
+ local tab=$'\t' comp
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
+ return 0
+ fi
local longest=0
+ local compline
# Look for the longest completion so that we can format things nicely
- while IFS='' read -r comp; do
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
# Strip any description before checking the length
- comp=${comp%%%%$tab*}
+ comp=${compline%%%%$tab*}
# Only consider the completions that match
- comp=$(compgen -W "$comp" -- "$cur")
+ [[ $comp == "$cur"* ]] || continue
+ COMPREPLY+=("$compline")
if ((${#comp}>longest)); then
longest=${#comp}
fi
- done < <(printf "%%s\n" "${out[@]}")
-
- local completions=()
- while IFS='' read -r comp; do
- if [ -z "$comp" ]; then
- continue
- fi
-
- __%[1]s_debug "Original comp: $comp"
- comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")"
- __%[1]s_debug "Final comp: $comp"
- completions+=("$comp")
- done < <(printf "%%s\n" "${out[@]}")
-
- while IFS='' read -r comp; do
- COMPREPLY+=("$comp")
- done < <(compgen -W "${completions[*]}" -- "$cur")
+ done < <(printf "%%s\n" "${completions[@]}")
# If there is a single completion left, remove the description text
if [ ${#COMPREPLY[*]} -eq 1 ]; then
__%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
- comp="${COMPREPLY[0]%%%% *}"
+ comp="${COMPREPLY[0]%%%%$tab*}"
__%[1]s_debug "Removed description from single completion, which is now: ${comp}"
- COMPREPLY=()
- COMPREPLY+=("$comp")
+ COMPREPLY[0]=$comp
+ else # Format the descriptions
+ __%[1]s_format_comp_descriptions $longest
fi
}
@@ -231,45 +265,48 @@ __%[1]s_handle_special_char()
__%[1]s_format_comp_descriptions()
{
- local tab
- tab=$(printf '\t')
- local comp="$1"
- local longest=$2
-
- # Properly format the description string which follows a tab character if there is one
- if [[ "$comp" == *$tab* ]]; then
- desc=${comp#*$tab}
- comp=${comp%%%%$tab*}
-
- # $COLUMNS stores the current shell width.
- # Remove an extra 4 because we add 2 spaces and 2 parentheses.
- maxdesclength=$(( COLUMNS - longest - 4 ))
-
- # Make sure we can fit a description of at least 8 characters
- # if we are to align the descriptions.
- if [[ $maxdesclength -gt 8 ]]; then
- # Add the proper number of spaces to align the descriptions
- for ((i = ${#comp} ; i < longest ; i++)); do
- comp+=" "
- done
- else
- # Don't pad the descriptions so we can fit more text after the completion
- maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
- fi
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __%[1]s_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if [[ $maxdesclength -gt 8 ]]; then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
- # If there is enough space for any description text,
- # truncate the descriptions that are too long for the shell width
- if [ $maxdesclength -gt 0 ]; then
- if [ ${#desc} -gt $maxdesclength ]; then
- desc=${desc:0:$(( maxdesclength - 1 ))}
- desc+="…"
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if [ $maxdesclength -gt 0 ]; then
+ if [ ${#desc} -gt $maxdesclength ]; then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
fi
- comp+=" ($desc)"
+ COMPREPLY[ci]=$comp
+ __%[1]s_debug "Final comp: $comp"
fi
- fi
-
- # Must use printf to escape all special characters
- printf "%%q" "${comp}"
+ done
}
__start_%[1]s()
@@ -310,7 +347,8 @@ fi
# ex: ts=4 sw=4 et filetype=sh
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ activeHelpMarker))
}
// GenBashCompletionFileV2 generates Bash completion version 2.
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 2cc18891d..675bb1340 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -18,6 +18,7 @@ package cobra
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -166,7 +167,7 @@ type Command struct {
// errWriter is a writer defined by the user that replaces stderr
errWriter io.Writer
- //FParseErrWhitelist flag parse errors to be ignored
+ // FParseErrWhitelist flag parse errors to be ignored
FParseErrWhitelist FParseErrWhitelist
// CompletionOptions is a set of options to control the handling of shell completion
@@ -224,12 +225,23 @@ type Command struct {
SuggestionsMinimumDistance int
}
-// Context returns underlying command context. If command wasn't
-// executed with ExecuteContext Context returns Background context.
+// Context returns underlying command context. If command was executed
+// with ExecuteContext or the context was set with SetContext, the
+// previously set context will be returned. Otherwise, nil is returned.
+//
+// Notice that a call to Execute and ExecuteC will replace a nil context of
+// a command with a context.Background, so a background context will be
+// returned by Context after one of these functions has been called.
func (c *Command) Context() context.Context {
return c.ctx
}
+// SetContext sets context for the command. It is set to context.Background by default and will be overwritten by
+// Command.ExecuteContext or Command.ExecuteContextC
+func (c *Command) SetContext(ctx context.Context) {
+ c.ctx = ctx
+}
+
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
@@ -852,6 +864,10 @@ func (c *Command) execute(a []string) (err error) {
if err := c.validateRequiredFlags(); err != nil {
return err
}
+ if err := c.validateFlagGroups(); err != nil {
+ return err
+ }
+
if c.RunE != nil {
if err := c.RunE(c, argWoFlags); err != nil {
return err
@@ -975,7 +991,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
if err != nil {
// Always show help if requested, even if SilenceErrors is in
// effect
- if err == flag.ErrHelp {
+ if errors.Is(err, flag.ErrHelp) {
cmd.HelpFunc()(cmd, args)
return cmd, nil
}
@@ -997,7 +1013,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
func (c *Command) ValidateArgs(args []string) error {
if c.Args == nil {
- return nil
+ return ArbitraryArgs(c, args)
}
return c.Args(c, args)
}
diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go
index 9ecd56a47..2c2483998 100644
--- a/vendor/github.com/spf13/cobra/completions.go
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -103,6 +103,14 @@ func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string
return nil, ShellCompDirectiveNoFileComp
}
+// FixedCompletions can be used to create a completion function which always
+// returns the same results.
+func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return choices, directive
+ }
+}
+
// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
flag := c.Flag(flagName)
@@ -170,6 +178,12 @@ func (c *Command) initCompleteCmd(args []string) {
noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
for _, comp := range completions {
+ if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable {
+ // Remove all activeHelp entries in this case
+ if strings.HasPrefix(comp, activeHelpMarker) {
+ continue
+ }
+ }
if noDescriptions {
// Remove any description that may be included following a tab character.
comp = strings.Split(comp, "\t")[0]
@@ -311,8 +325,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
var completions []string
var directive ShellCompDirective
+ // Enforce flag groups before doing flag completions
+ finalCmd.enforceFlagGroupsForCompletion()
+
// Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
- // doing this allows for completion of persistant flag names even for commands that disable flag parsing.
+ // doing this allows for completion of persistent flag names even for commands that disable flag parsing.
//
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
@@ -644,7 +661,7 @@ To load completions for every new session, execute once:
#### macOS:
- %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s
+ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
@@ -669,6 +686,10 @@ to enable it. You can execute the following once:
echo "autoload -U compinit; compinit" >> ~/.zshrc
+To load completions in your current shell session:
+
+ source <(%[1]s completion zsh); compdef _%[1]s %[1]s
+
To load completions for every new session, execute once:
#### Linux:
@@ -677,7 +698,7 @@ To load completions for every new session, execute once:
#### macOS:
- %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s
+ %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
index bb57fd568..005ee6be7 100644
--- a/vendor/github.com/spf13/cobra/fish_completions.go
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -11,8 +11,8 @@ import (
func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
// Variables should not contain a '-' or ':' character
nameForVar := name
- nameForVar = strings.Replace(nameForVar, "-", "_", -1)
- nameForVar = strings.Replace(nameForVar, ":", "_", -1)
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
compCmd := ShellCompRequestCmd
if !includeDesc {
@@ -38,7 +38,8 @@ function __%[1]s_perform_completion
__%[1]s_debug "args: $args"
__%[1]s_debug "last arg: $lastArg"
- set -l requestComp "$args[1] %[3]s $args[2..-1] $lastArg"
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
__%[1]s_debug "Calling $requestComp"
set -l results (eval $requestComp 2> /dev/null)
@@ -196,7 +197,7 @@ complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
`, nameForVar, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
// GenFishCompletion generates fish completion file and writes to the passed writer.
diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go
new file mode 100644
index 000000000..dc7843119
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/flag_groups.go
@@ -0,0 +1,223 @@
+// Copyright © 2022 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ requiredAsGroup = "cobra_annotation_required_if_others_set"
+ mutuallyExclusive = "cobra_annotation_mutually_exclusive"
+)
+
+// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+// if the command is invoked with a subset (but not all) of the given flags.
+func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+// if the command is invoked with more than one flag from the given set of flags.
+func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v))
+ }
+ // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed.
+ if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// validateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the
+// first error encountered.
+func (c *Command) validateFlagGroups() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+
+ // groupStatus format is the list of flags as a unique ID,
+ // then a map of each flag name and whether it is set or not.
+ groupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
+ })
+
+ if err := validateRequiredFlagGroups(groupStatus); err != nil {
+ return err
+ }
+ if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
+ return err
+ }
+ return nil
+}
+
+func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool {
+ for _, fname := range flagnames {
+ f := fs.Lookup(fname)
+ if f == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) {
+ groupInfo, found := pflag.Annotations[annotation]
+ if found {
+ for _, group := range groupInfo {
+ if groupStatus[group] == nil {
+ flagnames := strings.Split(group, " ")
+
+ // Only consider this flag group at all if all the flags are defined.
+ if !hasAllFlags(flags, flagnames...) {
+ continue
+ }
+
+ groupStatus[group] = map[string]bool{}
+ for _, name := range flagnames {
+ groupStatus[group][name] = false
+ }
+ }
+
+ groupStatus[group][pflag.Name] = pflag.Changed
+ }
+ }
+}
+
+func validateRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+
+ unset := []string{}
+ for flagname, isSet := range flagnameAndStatus {
+ if !isSet {
+ unset = append(unset, flagname)
+ }
+ }
+ if len(unset) == len(flagnameAndStatus) || len(unset) == 0 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(unset)
+ return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset)
+ }
+
+ return nil
+}
+
+func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) == 0 || len(set) == 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set)
+ }
+ return nil
+}
+
+func sortedKeys(m map[string]map[string]bool) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// enforceFlagGroupsForCompletion will do the following:
+// - when a flag in a group is present, other flags in the group will be marked required
+// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
+// This allows the standard completion logic to behave appropriately for flag groups
+func (c *Command) enforceFlagGroupsForCompletion() {
+ if c.DisableFlagParsing {
+ return
+ }
+
+ flags := c.Flags()
+ groupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ c.Flags().VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
+ })
+
+ // If a flag that is part of a group is present, we make all the other flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range groupStatus {
+ for _, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the group is set, mark the other ones as required
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+ }
+
+ // If a flag that is mutually exclusive to others is present, we hide the other
+ // flags of that group so the shell completion does not suggest them
+ for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
+ for flagName, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the mutually exclusive group is set, mark the other ones as hidden
+ // Don't mark the flag that is already set as hidden because it may be an
+ // array or slice flag and therefore must continue being suggested
+ for _, fName := range strings.Split(flagList, " ") {
+ if fName != flagName {
+ flag := c.Flags().Lookup(fName)
+ flag.Hidden = true
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
index 2d6855911..1d45d9f9a 100644
--- a/vendor/github.com/spf13/cobra/go.mod
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -3,7 +3,7 @@ module github.com/spf13/cobra
go 1.15
require (
- github.com/cpuguy83/go-md2man/v2 v2.0.1
+ github.com/cpuguy83/go-md2man/v2 v2.0.2
github.com/inconshreveable/mousetrap v1.0.0
github.com/spf13/pflag v1.0.5
gopkg.in/yaml.v2 v2.4.0
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
index 431058ed0..8ed228800 100644
--- a/vendor/github.com/spf13/cobra/go.sum
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -1,18 +1,12 @@
-github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
index 62d719f0b..379e7c088 100644
--- a/vendor/github.com/spf13/cobra/powershell_completions.go
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -61,6 +61,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
# Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
$Program,$Arguments = $Command.Split(" ",2)
+
$RequestComp="$Program %[2]s $Arguments"
__%[1]s_debug "RequestComp: $RequestComp"
@@ -90,11 +91,13 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
}
__%[1]s_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ $env:%[8]s=0
+
#call the command store the output in $out and redirect stderr and stdout to null
# $Out is an array contains each line per element
Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
-
# get directive from last line
[int]$Directive = $Out[-1].TrimStart(':')
if ($Directive -eq "") {
@@ -242,7 +245,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
}
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md
index 9674c348c..ac680118e 100644
--- a/vendor/github.com/spf13/cobra/projects_using_cobra.md
+++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md
@@ -1,8 +1,8 @@
## Projects using Cobra
- [Arduino CLI](https://github.com/arduino/arduino-cli)
-- [Bleve](http://www.blevesearch.com/)
-- [CockroachDB](http://www.cockroachlabs.com/)
+- [Bleve](https://blevesearch.com/)
+- [CockroachDB](https://www.cockroachlabs.com/)
- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
- [Datree](https://github.com/datreeio/datree)
- [Delve](https://github.com/derekparker/delve)
@@ -14,14 +14,15 @@
- [Github CLI](https://github.com/cli/cli)
- [GitHub Labeler](https://github.com/erdaltsksn/gh-label)
- [Golangci-lint](https://golangci-lint.run)
-- [GopherJS](http://www.gopherjs.org/)
+- [GopherJS](https://github.com/gopherjs/gopherjs)
- [GoReleaser](https://goreleaser.com)
- [Helm](https://helm.sh)
- [Hugo](https://gohugo.io)
- [Infracost](https://github.com/infracost/infracost)
- [Istio](https://istio.io)
- [Kool](https://github.com/kool-dev/kool)
-- [Kubernetes](http://kubernetes.io/)
+- [Kubernetes](https://kubernetes.io/)
+- [Kubescape](https://github.com/armosec/kubescape)
- [Linkerd](https://linkerd.io/)
- [Mattermost-server](https://github.com/mattermost/mattermost-server)
- [Mercure](https://mercure.rocks/)
@@ -36,9 +37,11 @@
- [Ory Hydra](https://github.com/ory/hydra)
- [Ory Kratos](https://github.com/ory/kratos)
- [Pixie](https://github.com/pixie-io/pixie)
+- [Polygon Edge](https://github.com/0xPolygon/polygon-edge)
- [Pouch](https://github.com/alibaba/pouch)
-- [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+- [ProjectAtomic (enterprise)](https://www.projectatomic.io/)
- [Prototool](https://github.com/uber/prototool)
+- [Pulumi](https://www.pulumi.com)
- [QRcp](https://github.com/claudiodangelis/qrcp)
- [Random](https://github.com/erdaltsksn/random)
- [Rclone](https://rclone.org/)
diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md
index 33a4c65a5..1e2058ed6 100644
--- a/vendor/github.com/spf13/cobra/shell_completions.md
+++ b/vendor/github.com/spf13/cobra/shell_completions.md
@@ -40,7 +40,7 @@ Bash:
# Linux:
$ %[1]s completion bash > /etc/bash_completion.d/%[1]s
# macOS:
- $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s
+ $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
Zsh:
@@ -122,7 +122,7 @@ For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns
Some simplified code from `kubectl get` looks like:
```go
-validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+validArgs = []string{ "pod", "node", "service", "replicationcontroller" }
cmd := &cobra.Command{
Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
@@ -148,7 +148,7 @@ node pod replicationcontroller service
If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
```go
-argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
cmd := &cobra.Command{
...
diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md
index 4a3c2b0da..5a7acf88e 100644
--- a/vendor/github.com/spf13/cobra/user_guide.md
+++ b/vendor/github.com/spf13/cobra/user_guide.md
@@ -32,7 +32,7 @@ func main() {
Cobra-CLI is its own program that will create your application and add any
commands you want. It's the easiest way to incorporate Cobra into your application.
-For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/master/README.md)
+For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
## Using the Cobra Library
@@ -51,7 +51,7 @@ var rootCmd = &cobra.Command{
Short: "Hugo is a very fast static site generator",
Long: `A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
- Complete documentation is available at http://hugo.spf13.com`,
+ Complete documentation is available at https://gohugo.io/documentation/`,
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
@@ -300,10 +300,34 @@ rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (re
rootCmd.MarkPersistentFlagRequired("region")
```
+### Flag Groups
+
+If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then
+Cobra can enforce that requirement:
+```go
+rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)")
+rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)")
+rootCmd.MarkFlagsRequiredTogether("username", "password")
+```
+
+You can also prevent different flags from being provided together if they represent mutually
+exclusive options such as specifying an output format as either `--json` or `--yaml` but never both:
+```go
+rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON")
+rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML")
+rootCmd.MarkFlagsMutuallyExclusive("json", "yaml")
+```
+
+In both of these cases:
+ - both local and persistent flags can be used
+ - **NOTE:** the group is only enforced on commands where every flag is defined
+ - a flag may appear in multiple groups
+ - a group may contain any number of flags
+
## Positional and Custom Arguments
-Validation of positional arguments can be specified using the `Args` field
-of `Command`.
+Validation of positional arguments can be specified using the `Args` field of `Command`.
+If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`.
The following validators are built in:
@@ -405,7 +429,7 @@ a count and a string.`,
}
```
-For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/).
## Help Command
@@ -603,7 +627,7 @@ Did you mean this?
Run 'hugo --help' for usage.
```
-Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
If you need to disable suggestions or tweak the string distance in your command, use:
@@ -636,3 +660,7 @@ Cobra can generate documentation based on subcommands, flags, etc. Read more abo
## Generating shell completions
Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
+
+## Providing Active Help
+
+Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md).
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
index 624adab53..65cd94c60 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -75,7 +75,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
- WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
# zsh completion for %-36[1]s -*- shell-script -*-
@@ -163,7 +163,24 @@ _%[1]s()
return
fi
+ local activeHelpMarker="%[8]s"
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __%[1]s_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __%[1]s_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
# The description is preceded by a TAB character.
@@ -171,7 +188,7 @@ _%[1]s()
# We first need to escape any : as part of the completion itself.
comp=${comp//:/\\:}
- local tab=$(printf '\t')
+ local tab="$(printf '\t')"
comp=${comp//$tab/:}
__%[1]s_debug "Adding completion: ${comp}"
@@ -180,6 +197,17 @@ _%[1]s()
fi
done < <(printf "%%s\n" "${out[@]}")
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __%[1]s_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
__%[1]s_debug "Activating nospace."
noSpace="-S ''"
@@ -254,5 +282,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then
fi
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ activeHelpMarker))
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 3bb22a971..95d8e59da 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -1,6 +1,7 @@
package assert
import (
+ "bytes"
"fmt"
"reflect"
"time"
@@ -32,7 +33,8 @@ var (
stringType = reflect.TypeOf("")
- timeType = reflect.TypeOf(time.Time{})
+ timeType = reflect.TypeOf(time.Time{})
+ bytesType = reflect.TypeOf([]byte{})
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
@@ -323,6 +325,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
}
+ case reflect.Slice:
+ {
+ // We only care about the []byte type.
+ if !canConvert(obj1Value, bytesType) {
+ break
+ }
+
+ // []byte can be compared!
+ bytesObj1, ok := obj1.([]byte)
+ if !ok {
+ bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
+
+ }
+ bytesObj2, ok := obj2.([]byte)
+ if !ok {
+ bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
+ }
+
+ return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+ }
}
return compareEqual, false
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
index df22c47fc..da867903e 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
@@ -9,7 +9,7 @@ package assert
import "reflect"
-// Wrapper around reflect.Value.CanConvert, for compatability
+// Wrapper around reflect.Value.CanConvert, for compatibility
// reasons.
func canConvert(value reflect.Value, to reflect.Type) bool {
return value.CanConvert(to)
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 27e2420ed..7880b8f94 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -736,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
+}
+
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index d9ea368d0..339515b8b 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -1461,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(a.t, actual, start, end, msgAndArgs...)
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRangef(a.t, actual, start, end, msg, args...)
+}
+
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 0357b2231..fa1245b18 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"os"
+ "path/filepath"
"reflect"
"regexp"
"runtime"
@@ -144,7 +145,8 @@ func CallerInfo() []string {
if len(parts) > 1 {
dir := parts[len(parts)-2]
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ path, _ := filepath.Abs(file)
+ callers = append(callers, fmt.Sprintf("%s:%d", path, line))
}
}
@@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool {
switch objValue.Kind() {
// collection types are empty when they have no element
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
- // pointers are empty if nil or if the value they point to is empty
+ // pointers are empty if nil or if the value they point to is empty
case reflect.Ptr:
if objValue.IsNil() {
return true
}
deref := objValue.Elem().Interface()
return isEmpty(deref)
- // for all other types, compare against the zero value
+ // for all other types, compare against the zero value
+ // array types are empty when they match their zero-initialized state
default:
zero := reflect.Zero(objValue.Type())
return reflect.DeepEqual(object, zero.Interface())
@@ -815,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true // we consider nil to be equal to the nil set
}
- subsetValue := reflect.ValueOf(subset)
defer func() {
if e := recover(); e != nil {
ok = false
@@ -825,14 +827,32 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
- if listKind != reflect.Array && listKind != reflect.Slice {
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
+ subsetValue := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ listValue := reflect.ValueOf(list)
+ subsetKeys := subsetValue.MapKeys()
+
+ for i := 0; i < len(subsetKeys); i++ {
+ subsetKey := subsetKeys[i]
+ subsetElement := subsetValue.MapIndex(subsetKey).Interface()
+ listElement := listValue.MapIndex(subsetKey).Interface()
+
+ if !ObjectsAreEqual(subsetElement, listElement) {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+ }
+ }
+
+ return true
+ }
+
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
ok, found := containsElement(list, element)
@@ -859,7 +879,6 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
defer func() {
if e := recover(); e != nil {
ok = false
@@ -869,14 +888,32 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
- if listKind != reflect.Array && listKind != reflect.Slice {
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
+ subsetValue := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ listValue := reflect.ValueOf(list)
+ subsetKeys := subsetValue.MapKeys()
+
+ for i := 0; i < len(subsetKeys); i++ {
+ subsetKey := subsetKeys[i]
+ subsetElement := subsetValue.MapIndex(subsetKey).Interface()
+ listElement := listValue.MapIndex(subsetKey).Interface()
+
+ if !ObjectsAreEqual(subsetElement, listElement) {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+ }
+
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
ok, found := containsElement(list, element)
@@ -1109,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration,
return true
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if end.Before(start) {
+ return Fail(t, "Start should be before end", msgAndArgs...)
+ }
+
+ if actual.Before(start) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...)
+ } else if actual.After(end) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...)
+ }
+
+ return true
+}
+
func toFloat(x interface{}) (float64, bool) {
var xf float64
xok := true
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index 59c48277a..880853f5a 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -1864,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
t.FailNow()
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.WithinRange(t, actual, start, end, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.WithinRangef(t, actual, start, end, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// YAMLEq asserts that two YAML strings are equivalent.
func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 5bb07c89c..960bf6f2c 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -1462,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ WithinRange(a.t, actual, start, end, msgAndArgs...)
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ WithinRangef(a.t, actual, start, end, msg, args...)
+}
+
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.mod b/vendor/github.com/vbauerster/mpb/v7/go.mod
index db1457e35..66056222e 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v7/go.mod
@@ -4,7 +4,7 @@ require (
github.com/VividCortex/ewma v1.2.0
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/mattn/go-runewidth v0.0.13
- golang.org/x/sys v0.0.0-20220209214540-3681064d5158
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.sum b/vendor/github.com/vbauerster/mpb/v7/go.sum
index f36888be9..1261e35ca 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v7/go.sum
@@ -6,5 +6,5 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index b56886f26..83f112c4c 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -106,8 +106,8 @@ var ARM64 struct {
// ARM contains the supported CPU features of the current ARM (32-bit) platform.
// All feature flags are false if:
-// 1. the current platform is not arm, or
-// 2. the current operating system is not Linux.
+// 1. the current platform is not arm, or
+// 2. the current operating system is not Linux.
var ARM struct {
_ CacheLinePad
HasSWP bool // SWP instruction support
diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go
index 78192498d..b981cfbb4 100644
--- a/vendor/golang.org/x/sys/execabs/execabs.go
+++ b/vendor/golang.org/x/sys/execabs/execabs.go
@@ -53,7 +53,7 @@ func relError(file, path string) error {
// LookPath instead returns an error.
func LookPath(file string) (string, error) {
path, err := exec.LookPath(file)
- if err != nil {
+ if err != nil && !isGo119ErrDot(err) {
return "", err
}
if filepath.Base(file) == file && !filepath.IsAbs(path) {
diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go
new file mode 100644
index 000000000..6ab5f5089
--- /dev/null
+++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package execabs
+
+func isGo119ErrDot(err error) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go
new file mode 100644
index 000000000..1e7a9ada0
--- /dev/null
+++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package execabs
+
+import "strings"
+
+func isGo119ErrDot(err error) bool {
+ // TODO: return errors.Is(err, exec.ErrDot)
+ return strings.Contains(err.Error(), "current directory")
+}
diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go
index 602473cba..a25223b8f 100644
--- a/vendor/golang.org/x/sys/plan9/syscall.go
+++ b/vendor/golang.org/x/sys/plan9/syscall.go
@@ -113,5 +113,6 @@ func (tv *Timeval) Nano() int64 {
// use is a no-op, but the compiler cannot see that it is.
// Calling use(p) ensures that p is kept live until that point.
+//
//go:noescape
func use(p unsafe.Pointer)
diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go
index 723b1f400..d079d8116 100644
--- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go
+++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go
@@ -115,6 +115,7 @@ func Write(fd int, p []byte) (n int, err error) {
var ioSync int64
//sys fd2path(fd int, buf []byte) (err error)
+
func Fd2path(fd int) (path string, err error) {
var buf [512]byte
@@ -126,6 +127,7 @@ func Fd2path(fd int) (path string, err error) {
}
//sys pipe(p *[2]int32) (err error)
+
func Pipe(p []int) (err error) {
if len(p) != 2 {
return syscall.ErrorString("bad arg in system call")
@@ -180,6 +182,7 @@ func (w Waitmsg) ExitStatus() int {
}
//sys await(s []byte) (n int, err error)
+
func Await(w *Waitmsg) (err error) {
var buf [512]byte
var f [5][]byte
@@ -301,42 +304,49 @@ func Getgroups() (gids []int, err error) {
}
//sys open(path string, mode int) (fd int, err error)
+
func Open(path string, mode int) (fd int, err error) {
fixwd()
return open(path, mode)
}
//sys create(path string, mode int, perm uint32) (fd int, err error)
+
func Create(path string, mode int, perm uint32) (fd int, err error) {
fixwd()
return create(path, mode, perm)
}
//sys remove(path string) (err error)
+
func Remove(path string) error {
fixwd()
return remove(path)
}
//sys stat(path string, edir []byte) (n int, err error)
+
func Stat(path string, edir []byte) (n int, err error) {
fixwd()
return stat(path, edir)
}
//sys bind(name string, old string, flag int) (err error)
+
func Bind(name string, old string, flag int) (err error) {
fixwd()
return bind(name, old, flag)
}
//sys mount(fd int, afd int, old string, flag int, aname string) (err error)
+
func Mount(fd int, afd int, old string, flag int, aname string) (err error) {
fixwd()
return mount(fd, afd, old, flag, aname)
}
//sys wstat(path string, edir []byte) (err error)
+
func Wstat(path string, edir []byte) (err error) {
fixwd()
return wstat(path, edir)
diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 934af313c..15721a510 100644
--- a/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -8,7 +8,6 @@
package unix
import (
- "bytes"
"unsafe"
)
@@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) {
// Name returns the interface name associated with the Ifreq.
func (ifr *Ifreq) Name() string {
- // BytePtrToString requires a NULL terminator or the program may crash. If
- // one is not present, just return the empty string.
- if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) {
- return ""
- }
-
- return BytePtrToString(&ifr.raw.Ifrn[0])
+ return ByteSliceToString(ifr.raw.Ifrn[:])
}
// According to netdevice(7), only AF_INET addresses are returned for numerous
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index f2a114fc2..ad22c33db 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) {
}
//sys utimes(path string, times *[2]Timeval) (err error)
+
func Utimes(path string, tv []Timeval) error {
if len(tv) != 2 {
return EINVAL
@@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error {
}
//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
+
func UtimesNano(path string, ts []Timespec) error {
if len(ts) != 2 {
return EINVAL
@@ -300,11 +302,13 @@ func direntNamlen(buf []byte) (uint64, bool) {
}
//sys getdirent(fd int, buf []byte) (n int, err error)
+
func Getdents(fd int, buf []byte) (n int, err error) {
return getdirent(fd, buf)
}
//sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error)
+
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
var status _C_int
var r Pid_t
@@ -372,6 +376,7 @@ func (w WaitStatus) TrapCause() int { return -1 }
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range
+
func Fsync(fd int) error {
return fsyncRange(fd, O_SYNC, 0, 0)
}
@@ -536,6 +541,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
//sys Getsystemcfg(label int) (n uint64)
//sys umount(target string) (err error)
+
func Unmount(target string, flags int) (err error) {
if flags != 0 {
// AIX doesn't have any flags for umount.
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 09a25c653..e5448cc93 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -504,6 +504,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error)
//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error)
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
//sys Pathconf(path string, name int) (val int, err error)
@@ -572,7 +573,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
// Nfssvc
// Getfh
// Quotactl
-// Mount
// Csops
// Waitid
// Add_profil
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index c61e27498..61c0d0de1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -125,11 +125,13 @@ func Pipe2(p []int, flags int) (err error) {
}
//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
+
func pread(fd int, p []byte, offset int64) (n int, err error) {
return extpread(fd, p, 0, offset)
}
//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
+
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
return extpwrite(fd, p, 0, offset)
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index d251dafae..c8d203212 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -512,24 +512,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
//
// Server example:
//
-// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
-// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
-// Channel: 1,
-// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
-// })
-// _ = Listen(fd, 1)
-// nfd, sa, _ := Accept(fd)
-// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
-// Read(nfd, buf)
+// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
+// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
+// Channel: 1,
+// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
+// })
+// _ = Listen(fd, 1)
+// nfd, sa, _ := Accept(fd)
+// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
+// Read(nfd, buf)
//
// Client example:
//
-// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
-// _ = Connect(fd, &SockaddrRFCOMM{
-// Channel: 1,
-// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
-// })
-// Write(fd, []byte(`hello`))
+// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
+// _ = Connect(fd, &SockaddrRFCOMM{
+// Channel: 1,
+// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
+// })
+// Write(fd, []byte(`hello`))
type SockaddrRFCOMM struct {
// Addr represents a bluetooth address, byte ordering is little-endian.
Addr [6]uint8
@@ -556,12 +556,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) {
// The SockaddrCAN struct must be bound to the socket file descriptor
// using Bind before the CAN socket can be used.
//
-// // Read one raw CAN frame
-// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
-// addr := &SockaddrCAN{Ifindex: index}
-// Bind(fd, addr)
-// frame := make([]byte, 16)
-// Read(fd, frame)
+// // Read one raw CAN frame
+// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
+// addr := &SockaddrCAN{Ifindex: index}
+// Bind(fd, addr)
+// frame := make([]byte, 16)
+// Read(fd, frame)
//
// The full SocketCAN documentation can be found in the linux kernel
// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
@@ -632,13 +632,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
// Here is an example of using an AF_ALG socket with SHA1 hashing.
// The initial socket setup process is as follows:
//
-// // Open a socket to perform SHA1 hashing.
-// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
-// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
-// unix.Bind(fd, addr)
-// // Note: unix.Accept does not work at this time; must invoke accept()
-// // manually using unix.Syscall.
-// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
+// // Open a socket to perform SHA1 hashing.
+// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
+// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
+// unix.Bind(fd, addr)
+// // Note: unix.Accept does not work at this time; must invoke accept()
+// // manually using unix.Syscall.
+// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
//
// Once a file descriptor has been returned from Accept, it may be used to
// perform SHA1 hashing. The descriptor is not safe for concurrent use, but
@@ -647,39 +647,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
// When hashing a small byte slice or string, a single Write and Read may
// be used:
//
-// // Assume hashfd is already configured using the setup process.
-// hash := os.NewFile(hashfd, "sha1")
-// // Hash an input string and read the results. Each Write discards
-// // previous hash state. Read always reads the current state.
-// b := make([]byte, 20)
-// for i := 0; i < 2; i++ {
-// io.WriteString(hash, "Hello, world.")
-// hash.Read(b)
-// fmt.Println(hex.EncodeToString(b))
-// }
-// // Output:
-// // 2ae01472317d1935a84797ec1983ae243fc6aa28
-// // 2ae01472317d1935a84797ec1983ae243fc6aa28
+// // Assume hashfd is already configured using the setup process.
+// hash := os.NewFile(hashfd, "sha1")
+// // Hash an input string and read the results. Each Write discards
+// // previous hash state. Read always reads the current state.
+// b := make([]byte, 20)
+// for i := 0; i < 2; i++ {
+// io.WriteString(hash, "Hello, world.")
+// hash.Read(b)
+// fmt.Println(hex.EncodeToString(b))
+// }
+// // Output:
+// // 2ae01472317d1935a84797ec1983ae243fc6aa28
+// // 2ae01472317d1935a84797ec1983ae243fc6aa28
//
// For hashing larger byte slices, or byte streams such as those read from
// a file or socket, use Sendto with MSG_MORE to instruct the kernel to update
// the hash digest instead of creating a new one for a given chunk and finalizing it.
//
-// // Assume hashfd and addr are already configured using the setup process.
-// hash := os.NewFile(hashfd, "sha1")
-// // Hash the contents of a file.
-// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
-// b := make([]byte, 4096)
-// for {
-// n, err := f.Read(b)
-// if err == io.EOF {
-// break
-// }
-// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
-// }
-// hash.Read(b)
-// fmt.Println(hex.EncodeToString(b))
-// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
+// // Assume hashfd and addr are already configured using the setup process.
+// hash := os.NewFile(hashfd, "sha1")
+// // Hash the contents of a file.
+// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
+// b := make([]byte, 4096)
+// for {
+// n, err := f.Read(b)
+// if err == io.EOF {
+// break
+// }
+// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
+// }
+// hash.Read(b)
+// fmt.Println(hex.EncodeToString(b))
+// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
//
// For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html.
type SockaddrALG struct {
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index 15d637d63..78daceb33 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -81,6 +81,7 @@ func Pipe(p []int) (err error) {
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
func Pipe2(p []int, flags int) error {
if len(p) != 2 {
return EINVAL
@@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error {
}
//sys Getdents(fd int, buf []byte) (n int, err error)
+
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
n, err = Getdents(fd, buf)
if err != nil || basep == nil {
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 234fd4a5d..1b305fab1 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -5,7 +5,7 @@
// +build 386,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 58619b758..6bcdef5dd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -5,7 +5,7 @@
// +build amd64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 3a64ff59d..e65df0f8d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -5,7 +5,7 @@
// +build arm,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index abe0b9257..c7021115a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -5,7 +5,7 @@
// +build arm64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index ebc5f3218..0d83a1cd4 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -5,7 +5,7 @@
// +build loong64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 14d7a8439..7f44a495b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -5,7 +5,7 @@
// +build mips,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 99e7c4ac0..2f92b4e48 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -5,7 +5,7 @@
// +build mips64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 496364c33..f5367a966 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -5,7 +5,7 @@
// +build mips64le,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 3e4083085..2e22337d7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -5,7 +5,7 @@
// +build mipsle,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 1151a7dfa..858c4f30f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -5,7 +5,7 @@
// +build ppc,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index ed17f249e..af2a7ba6e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -5,7 +5,7 @@
// +build ppc64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index d84a37c1a..eaa2eb8e2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -5,7 +5,7 @@
// +build ppc64le,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 5cafba83f..faaa9f063 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -5,7 +5,7 @@
// +build riscv64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 6d122da41..0d161f0b7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -5,7 +5,7 @@
// +build s390x,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 6bd19e51d..4fd497a3e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -5,7 +5,7 @@
// +build sparc64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 879376589..467deed76 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index 8da90cf0e..7e308a476 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
+
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index f47eedd5a..35938d34f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 4d26f7d01..b09e5bb0e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
+
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 531409256..4948362f2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build 386 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index b02ab83db..f64345e0e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build amd64 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 9e6871d2e..72469c79e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build arm && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index b732d1255..68f072283 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build arm64 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 61fbb24f8..090ae46c6 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build loong64 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 5310f71ea..03604cca1 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mips && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 219bbb126..fe57a7b26 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mips64 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index be9432da5..3f0db4da8 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mips64le && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index d0155a42e..70ecd3b23 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mipsle && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 01c17bcc6..4e700120d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build ppc && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 944a9c3c7..34a57c699 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build ppc64 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 5d2c90e1c..6b84a4729 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build ppc64le && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index e173cb515..c4a305fe2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build riscv64 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index 6106715d5..a1f1e4c9e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build s390x && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index ca7b37b4b..df95ebf3a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -1,4 +1,4 @@
-// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build sparc64 && linux
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
index baf5fe650..2ed718ca0 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
@@ -94,10 +94,10 @@ type Statfs_t struct {
F_namemax uint32
F_owner uint32
F_ctime uint64
- F_fstypename [16]int8
- F_mntonname [90]int8
- F_mntfromname [90]int8
- F_mntfromspec [90]int8
+ F_fstypename [16]byte
+ F_mntonname [90]byte
+ F_mntfromname [90]byte
+ F_mntfromspec [90]byte
Pad_cgo_0 [2]byte
Mount_info [160]byte
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
index e21ae8ecf..b4fb97ebe 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
@@ -96,10 +96,10 @@ type Statfs_t struct {
F_namemax uint32
F_owner uint32
F_ctime uint64
- F_fstypename [16]int8
- F_mntonname [90]int8
- F_mntfromname [90]int8
- F_mntfromspec [90]int8
+ F_fstypename [16]byte
+ F_mntonname [90]byte
+ F_mntfromname [90]byte
+ F_mntfromspec [90]byte
_ [2]byte
Mount_info [160]byte
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
index f190651cd..2c4675040 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
@@ -98,10 +98,10 @@ type Statfs_t struct {
F_namemax uint32
F_owner uint32
F_ctime uint64
- F_fstypename [16]int8
- F_mntonname [90]int8
- F_mntfromname [90]int8
- F_mntfromspec [90]int8
+ F_fstypename [16]byte
+ F_mntonname [90]byte
+ F_mntfromname [90]byte
+ F_mntfromspec [90]byte
_ [2]byte
Mount_info [160]byte
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
index 84747c582..ddee04514 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
@@ -94,10 +94,10 @@ type Statfs_t struct {
F_namemax uint32
F_owner uint32
F_ctime uint64
- F_fstypename [16]int8
- F_mntonname [90]int8
- F_mntfromname [90]int8
- F_mntfromspec [90]int8
+ F_fstypename [16]byte
+ F_mntonname [90]byte
+ F_mntfromname [90]byte
+ F_mntfromspec [90]byte
_ [2]byte
Mount_info [160]byte
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
index ac5c8b637..eb13d4e8b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
@@ -94,10 +94,10 @@ type Statfs_t struct {
F_namemax uint32
F_owner uint32
F_ctime uint64
- F_fstypename [16]int8
- F_mntonname [90]int8
- F_mntfromname [90]int8
- F_mntfromspec [90]int8
+ F_fstypename [16]byte
+ F_mntonname [90]byte
+ F_mntfromname [90]byte
+ F_mntfromspec [90]byte
_ [2]byte
Mount_info [160]byte
}
diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go
index 855698bb2..75980fd44 100644
--- a/vendor/golang.org/x/sys/windows/exec_windows.go
+++ b/vendor/golang.org/x/sys/windows/exec_windows.go
@@ -15,11 +15,11 @@ import (
// in http://msdn.microsoft.com/en-us/library/ms880421.
// This function returns "" (2 double quotes) if s is empty.
// Alternatively, these transformations are done:
-// - every back slash (\) is doubled, but only if immediately
-// followed by double quote (");
-// - every double quote (") is escaped by back slash (\);
-// - finally, s is wrapped with double quotes (arg -> "arg"),
-// but only if there is space or tab inside s.
+// - every back slash (\) is doubled, but only if immediately
+// followed by double quote (");
+// - every double quote (") is escaped by back slash (\);
+// - finally, s is wrapped with double quotes (arg -> "arg"),
+// but only if there is space or tab inside s.
func EscapeArg(s string) string {
if len(s) == 0 {
return "\"\""
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
index 906325e09..6c8d97b6a 100644
--- a/vendor/golang.org/x/sys/windows/registry/key.go
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -20,7 +20,6 @@
// log.Fatal(err)
// }
// fmt.Printf("Windows system root is %q\n", s)
-//
package registry
import (
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index ce3075c45..636e5de60 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -623,7 +623,6 @@ var (
func getStdHandle(stdhandle uint32) (fd Handle) {
r, _ := GetStdHandle(stdhandle)
- CloseOnExec(r)
return r
}
diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go
index d59270880..1a40d1012 100644
--- a/vendor/golang.org/x/term/term.go
+++ b/vendor/golang.org/x/term/term.go
@@ -7,11 +7,11 @@
//
// Putting a terminal into raw mode is the most common requirement:
//
-// oldState, err := term.MakeRaw(int(os.Stdin.Fd()))
-// if err != nil {
-// panic(err)
-// }
-// defer term.Restore(int(os.Stdin.Fd()), oldState)
+// oldState, err := term.MakeRaw(int(os.Stdin.Fd()))
+// if err != nil {
+// panic(err)
+// }
+// defer term.Restore(int(os.Stdin.Fd()), oldState)
//
// Note that on non-Unix systems os.Stdin.Fd() may not be 0.
package term
diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go
index 535ab8257..4b48a5899 100644
--- a/vendor/golang.org/x/term/terminal.go
+++ b/vendor/golang.org/x/term/terminal.go
@@ -935,7 +935,7 @@ func (s *stRingBuffer) Add(a string) {
// next most recent, and so on. If such an element doesn't exist then ok is
// false.
func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
- if n >= s.size {
+ if n < 0 || n >= s.size {
return "", false
}
index := s.head - n
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
index df36e3a30..0173b6982 100644
--- a/vendor/gopkg.in/yaml.v3/decode.go
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t {
if p.event.typ != yaml_NO_EVENT {
return p.event.typ
}
- if !yaml_parser_parse(&p.parser, &p.event) {
+ // It's curious choice from the underlying API to generally return a
+ // positive result on success, but on this case return true in an error
+ // scenario. This was the source of bugs in the past (issue #666).
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
p.fail()
}
return p.event.typ
@@ -320,6 +323,8 @@ type decoder struct {
decodeCount int
aliasCount int
aliasDepth int
+
+ mergedFields map[interface{}]bool
}
var (
@@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
}
}
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+
+ var mergeNode *Node
+
mapIsNew := false
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
@@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
}
for i := 0; i < l; i += 2 {
if isMerge(n.Content[i]) {
- d.merge(n.Content[i+1], out)
+ mergeNode = n.Content[i+1]
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.Content[i], k) {
+ if mergedFields != nil {
+ ki := k.Interface()
+ if mergedFields[ki] {
+ continue
+ }
+ mergedFields[ki] = true
+ }
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
@@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
}
}
}
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+
d.stringMapType = stringMapType
d.generalMapType = generalMapType
return true
@@ -844,7 +867,8 @@ func isStringMap(n *Node) bool {
}
l := len(n.Content)
for i := 0; i < l; i += 2 {
- if n.Content[i].ShortTag() != strTag {
+ shortTag := n.Content[i].ShortTag()
+ if shortTag != strTag && shortTag != mergeTag {
return false
}
}
@@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
- inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
@@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
d.prepare(n, field)
}
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+ var mergeNode *Node
var doneFields []bool
if d.uniqueKeys {
doneFields = make([]bool, len(sinfo.FieldsList))
@@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
for i := 0; i < l; i += 2 {
ni := n.Content[i]
if isMerge(ni) {
- d.merge(n.Content[i+1], out)
+ mergeNode = n.Content[i+1]
continue
}
if !d.unmarshal(ni, name) {
continue
}
- if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ sname := name.String()
+ if mergedFields != nil {
+ if mergedFields[sname] {
+ continue
+ }
+ mergedFields[sname] = true
+ }
+ if info, ok := sinfo.FieldsMap[sname]; ok {
if d.uniqueKeys {
if doneFields[info.Id] {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
@@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
}
}
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
return true
}
@@ -918,19 +956,29 @@ func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
-func (d *decoder) merge(n *Node, out reflect.Value) {
- switch n.Kind {
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
+ mergedFields := d.mergedFields
+ if mergedFields == nil {
+ d.mergedFields = make(map[interface{}]bool)
+ for i := 0; i < len(parent.Content); i += 2 {
+ k := reflect.New(ifaceType).Elem()
+ if d.unmarshal(parent.Content[i], k) {
+ d.mergedFields[k.Interface()] = true
+ }
+ }
+ }
+
+ switch merge.Kind {
case MappingNode:
- d.unmarshal(n, out)
+ d.unmarshal(merge, out)
case AliasNode:
- if n.Alias != nil && n.Alias.Kind != MappingNode {
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode {
failWantMap()
}
- d.unmarshal(n, out)
+ d.unmarshal(merge, out)
case SequenceNode:
- // Step backwards as earlier nodes take precedence.
- for i := len(n.Content) - 1; i >= 0; i-- {
- ni := n.Content[i]
+ for i := 0; i < len(merge.Content); i++ {
+ ni := merge.Content[i]
if ni.Kind == AliasNode {
if ni.Alias != nil && ni.Alias.Kind != MappingNode {
failWantMap()
@@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) {
default:
failWantMap()
}
+
+ d.mergedFields = mergedFields
}
func isMerge(n *Node) bool {
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
index ac66fccc0..268558a0d 100644
--- a/vendor/gopkg.in/yaml.v3/parserc.go
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i
func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
+ if token == nil {
+ return false
+ }
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
@@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
}
token := peek_token(parser)
- if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
return
}
@@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
+ if token == nil {
+ return false
+ }
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
@@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev
func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
+ if token == nil {
+ return false
+ }
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f4eda9756..195c40405 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -11,7 +11,7 @@ github.com/Microsoft/go-winio/backuptar
github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/pkg/security
github.com/Microsoft/go-winio/vhd
-# github.com/Microsoft/hcsshim v0.9.2
+# github.com/Microsoft/hcsshim v0.9.3
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/computestorage
github.com/Microsoft/hcsshim/internal/cow
@@ -21,11 +21,13 @@ github.com/Microsoft/hcsshim/internal/hcs/schema2
github.com/Microsoft/hcsshim/internal/hcserror
github.com/Microsoft/hcsshim/internal/hns
github.com/Microsoft/hcsshim/internal/interop
+github.com/Microsoft/hcsshim/internal/jobobject
github.com/Microsoft/hcsshim/internal/log
github.com/Microsoft/hcsshim/internal/logfields
github.com/Microsoft/hcsshim/internal/longpath
github.com/Microsoft/hcsshim/internal/mergemaps
github.com/Microsoft/hcsshim/internal/oc
+github.com/Microsoft/hcsshim/internal/queue
github.com/Microsoft/hcsshim/internal/safefile
github.com/Microsoft/hcsshim/internal/timeout
github.com/Microsoft/hcsshim/internal/vmcompute
@@ -63,7 +65,7 @@ github.com/container-orchestrated-devices/container-device-interface/pkg/cdi
github.com/container-orchestrated-devices/container-device-interface/specs-go
# github.com/containerd/cgroups v1.0.3
github.com/containerd/cgroups/stats/v1
-# github.com/containerd/containerd v1.6.4
+# github.com/containerd/containerd v1.6.6
github.com/containerd/containerd/errdefs
github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/userns
@@ -72,7 +74,7 @@ github.com/containerd/containerd/sys
# github.com/containerd/stargz-snapshotter/estargz v0.11.4
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
-# github.com/containernetworking/cni v1.1.0
+# github.com/containernetworking/cni v1.1.1
## explicit
github.com/containernetworking/cni/libcni
github.com/containernetworking/cni/pkg/invoke
@@ -87,7 +89,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.1.1
## explicit
github.com/containernetworking/plugins/pkg/ns
-# github.com/containers/buildah v1.26.1
+# github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c
## explicit
github.com/containers/buildah
github.com/containers/buildah/bind
@@ -109,15 +111,17 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.48.1-0.20220512112240-7536bf6ff9b1
+# github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9
## explicit
github.com/containers/common/libimage
+github.com/containers/common/libimage/define
github.com/containers/common/libimage/manifests
github.com/containers/common/libnetwork/cni
github.com/containers/common/libnetwork/etchosts
github.com/containers/common/libnetwork/internal/util
github.com/containers/common/libnetwork/netavark
github.com/containers/common/libnetwork/network
+github.com/containers/common/libnetwork/resolvconf
github.com/containers/common/libnetwork/types
github.com/containers/common/libnetwork/util
github.com/containers/common/pkg/apparmor
@@ -138,6 +142,7 @@ github.com/containers/common/pkg/netns
github.com/containers/common/pkg/parse
github.com/containers/common/pkg/report
github.com/containers/common/pkg/report/camelcase
+github.com/containers/common/pkg/resize
github.com/containers/common/pkg/retry
github.com/containers/common/pkg/seccomp
github.com/containers/common/pkg/secrets
@@ -155,7 +160,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.21.2-0.20220519193817-1e26896b8059
+# github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
## explicit
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -168,9 +173,11 @@ github.com/containers/image/v5/docker/policyconfiguration
github.com/containers/image/v5/docker/reference
github.com/containers/image/v5/image
github.com/containers/image/v5/internal/blobinfocache
+github.com/containers/image/v5/internal/image
github.com/containers/image/v5/internal/imagedestination
github.com/containers/image/v5/internal/imagesource
github.com/containers/image/v5/internal/iolimits
+github.com/containers/image/v5/internal/manifest
github.com/containers/image/v5/internal/pkg/platform
github.com/containers/image/v5/internal/private
github.com/containers/image/v5/internal/putblobdigest
@@ -208,7 +215,7 @@ github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
github.com/containers/libtrust
-# github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
+# github.com/containers/ocicrypt v1.1.5
## explicit
github.com/containers/ocicrypt
github.com/containers/ocicrypt/blockcipher
@@ -235,7 +242,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8
+# github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@@ -328,7 +335,7 @@ github.com/docker/distribution/registry/client/auth/challenge
github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
-# github.com/docker/docker v20.10.16+incompatible
+# github.com/docker/docker v20.10.17+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@@ -379,10 +386,8 @@ github.com/docker/go-plugins-helpers/volume
## explicit
github.com/docker/go-units
# github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
+## explicit
github.com/docker/libnetwork/ipamutils
-github.com/docker/libnetwork/resolvconf
-github.com/docker/libnetwork/resolvconf/dns
-github.com/docker/libnetwork/types
# github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4
## explicit
github.com/dtylman/scp
@@ -391,7 +396,7 @@ github.com/felixge/httpsnoop
# github.com/fsnotify/fsnotify v1.5.4
## explicit
github.com/fsnotify/fsnotify
-# github.com/fsouza/go-dockerclient v1.7.11
+# github.com/fsouza/go-dockerclient v1.8.1
github.com/fsouza/go-dockerclient
# github.com/ghodss/yaml v1.0.0
## explicit
@@ -439,18 +444,16 @@ github.com/hashicorp/errwrap
# github.com/hashicorp/go-multierror v1.1.1
## explicit
github.com/hashicorp/go-multierror
-# github.com/imdario/mergo v0.3.12
+# github.com/imdario/mergo v0.3.13
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0
github.com/inconshreveable/mousetrap
-# github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee
-github.com/ishidawataru/sctp
# github.com/jinzhu/copier v0.3.5
github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.4
+# github.com/klauspost/compress v1.15.6
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -478,9 +481,9 @@ github.com/matttproud/golang_protobuf_extensions/pbutil
github.com/miekg/pkcs11
# github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/mistifyio/go-zfs
-# github.com/moby/sys/mount v0.2.0
+# github.com/moby/sys/mount v0.3.3
github.com/moby/sys/mount
-# github.com/moby/sys/mountinfo v0.6.1
+# github.com/moby/sys/mountinfo v0.6.2
github.com/moby/sys/mountinfo
# github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
## explicit
@@ -553,10 +556,13 @@ github.com/opencontainers/go-digest
## explicit
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
-# github.com/opencontainers/runc v1.1.2
+# github.com/opencontainers/runc v1.1.3 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc
## explicit
github.com/opencontainers/runc/libcontainer/apparmor
github.com/opencontainers/runc/libcontainer/cgroups
+github.com/opencontainers/runc/libcontainer/cgroups/fs
+github.com/opencontainers/runc/libcontainer/cgroups/fs2
+github.com/opencontainers/runc/libcontainer/cgroups/fscommon
github.com/opencontainers/runc/libcontainer/configs
github.com/opencontainers/runc/libcontainer/devices
github.com/opencontainers/runc/libcontainer/user
@@ -589,7 +595,6 @@ github.com/openshift/imagebuilder/strslice
github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
# github.com/pkg/errors v0.9.1
-## explicit
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
@@ -626,13 +631,13 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp
github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp
github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy
github.com/rootless-containers/rootlesskit/pkg/port/portutil
-# github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921
+# github.com/seccomp/libseccomp-golang v0.10.0
github.com/seccomp/libseccomp-golang
# github.com/sirupsen/logrus v1.8.1
## explicit
github.com/sirupsen/logrus
github.com/sirupsen/logrus/hooks/syslog
-# github.com/spf13/cobra v1.4.0
+# github.com/spf13/cobra v1.5.0
## explicit
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
@@ -640,11 +645,11 @@ github.com/spf13/cobra
github.com/spf13/pflag
# github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980
github.com/stefanberger/go-pkcs11uri
-# github.com/stretchr/testify v1.7.1
+# github.com/stretchr/testify v1.8.0
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
-# github.com/sylabs/sif/v2 v2.7.0
+# github.com/sylabs/sif/v2 v2.7.1
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit
@@ -669,7 +674,7 @@ github.com/ulikunitz/xz/lzma
github.com/vbatts/tar-split/archive/tar
github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
-# github.com/vbauerster/mpb/v7 v7.4.1
+# github.com/vbauerster/mpb/v7 v7.4.2
## explicit
github.com/vbauerster/mpb/v7
github.com/vbauerster/mpb/v7/cwriter
@@ -734,7 +739,7 @@ golang.org/x/net/trace
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
+# golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
## explicit
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -743,7 +748,7 @@ golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
+# golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
## explicit
golang.org/x/term
# golang.org/x/text v0.3.7
@@ -861,9 +866,10 @@ gopkg.in/square/go-jose.v2/json
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
# gopkg.in/yaml.v2 v2.4.0
-## explicit
gopkg.in/yaml.v2
-# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
+# gopkg.in/yaml.v3 v3.0.1
+## explicit
gopkg.in/yaml.v3
# sigs.k8s.io/yaml v1.3.0
sigs.k8s.io/yaml
+# github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc