summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml264
-rw-r--r--.papr.yml121
-rw-r--r--.papr_prepare.sh7
-rwxr-xr-x.tool/lint2
-rwxr-xr-xAPI.md288
-rw-r--r--CONTRIBUTING.md23
-rw-r--r--Dockerfile9
-rw-r--r--Dockerfile.CentOS2
-rw-r--r--Dockerfile.Fedora4
-rw-r--r--Makefile32
-rw-r--r--OWNERS5
-rw-r--r--README.md39
-rw-r--r--RELEASE_NOTES.md66
-rw-r--r--changelog.txt314
-rw-r--r--cmd/podman/build.go15
-rw-r--r--cmd/podman/checkpoint.go16
-rw-r--r--cmd/podman/commit.go2
-rw-r--r--cmd/podman/common.go14
-rw-r--r--cmd/podman/container.go3
-rw-r--r--cmd/podman/containers_prune.go74
-rw-r--r--cmd/podman/create.go78
-rw-r--r--cmd/podman/create_cli.go11
-rw-r--r--cmd/podman/exists.go120
-rw-r--r--cmd/podman/generate.go23
-rw-r--r--cmd/podman/generate_kube.go111
-rw-r--r--cmd/podman/image.go4
-rw-r--r--cmd/podman/images.go16
-rw-r--r--cmd/podman/images_prune.go34
-rw-r--r--cmd/podman/import.go2
-rw-r--r--cmd/podman/inspect.go6
-rw-r--r--cmd/podman/kill.go23
-rw-r--r--cmd/podman/libpodruntime/runtime.go36
-rw-r--r--cmd/podman/login.go67
-rw-r--r--cmd/podman/logs.go17
-rw-r--r--cmd/podman/main.go15
-rw-r--r--cmd/podman/mount.go21
-rw-r--r--cmd/podman/pause.go17
-rw-r--r--cmd/podman/pod.go1
-rw-r--r--cmd/podman/pod_create.go23
-rw-r--r--cmd/podman/pod_stop.go11
-rw-r--r--cmd/podman/ps.go7
-rw-r--r--cmd/podman/pull.go10
-rw-r--r--cmd/podman/push.go16
-rw-r--r--cmd/podman/restart.go83
-rw-r--r--cmd/podman/restore.go13
-rw-r--r--cmd/podman/rm.go26
-rw-r--r--cmd/podman/rmi.go44
-rw-r--r--cmd/podman/run.go7
-rw-r--r--cmd/podman/runlabel.go80
-rw-r--r--cmd/podman/save.go2
-rw-r--r--cmd/podman/search.go70
-rw-r--r--cmd/podman/shared/container.go92
-rw-r--r--cmd/podman/shared/funcs.go36
-rw-r--r--cmd/podman/shared/funcs_test.go9
-rw-r--r--cmd/podman/shared/parallel.go10
-rw-r--r--cmd/podman/shared/pod.go37
-rw-r--r--cmd/podman/shared/prune.go24
-rw-r--r--cmd/podman/start.go14
-rw-r--r--cmd/podman/stop.go30
-rw-r--r--cmd/podman/trust.go293
-rw-r--r--cmd/podman/unpause.go18
-rw-r--r--cmd/podman/utils.go51
-rw-r--r--cmd/podman/varlink/io.podman.varlink230
-rw-r--r--cmd/podman/version.go33
-rw-r--r--cmd/podman/volume.go26
-rw-r--r--cmd/podman/volume_create.go97
-rw-r--r--cmd/podman/volume_inspect.go63
-rw-r--r--cmd/podman/volume_ls.go308
-rw-r--r--cmd/podman/volume_prune.go86
-rw-r--r--cmd/podman/volume_rm.go71
-rw-r--r--commands.md6
-rw-r--r--completions/bash/podman1296
-rw-r--r--contrib/cirrus/README.md217
-rwxr-xr-xcontrib/cirrus/build_vm_images.sh41
-rwxr-xr-xcontrib/cirrus/integration_test.sh9
-rw-r--r--contrib/cirrus/lib.sh114
-rwxr-xr-xcontrib/cirrus/optional_system_test.sh24
-rw-r--r--contrib/cirrus/packer/.gitignore7
-rw-r--r--contrib/cirrus/packer/Makefile108
-rw-r--r--contrib/cirrus/packer/README.md3
-rw-r--r--contrib/cirrus/packer/centos_setup.sh2
-rw-r--r--contrib/cirrus/packer/fah_base-setup.sh45
-rw-r--r--contrib/cirrus/packer/fah_setup.sh23
-rw-r--r--contrib/cirrus/packer/fedora_base-setup.sh27
-rw-r--r--contrib/cirrus/packer/fedora_setup.sh6
-rw-r--r--contrib/cirrus/packer/image-builder-image_base-setup.sh75
-rw-r--r--contrib/cirrus/packer/libpod_base_images.yml193
-rw-r--r--contrib/cirrus/packer/libpod_images.json130
-rw-r--r--contrib/cirrus/packer/libpod_images.yml96
-rw-r--r--contrib/cirrus/packer/make-user-data.sh20
l---------contrib/cirrus/packer/prior_fedora_base-setup.sh1
-rw-r--r--contrib/cirrus/packer/rhel_base-setup.sh52
-rw-r--r--contrib/cirrus/packer/rhel_setup.sh40
-rw-r--r--contrib/cirrus/packer/ubuntu_setup.sh13
-rwxr-xr-xcontrib/cirrus/setup_environment.sh20
-rwxr-xr-xcontrib/cirrus/success.sh16
-rwxr-xr-xcontrib/cirrus/system_test.sh (renamed from contrib/cirrus/verify_source.sh)13
-rwxr-xr-xcontrib/cirrus/unit_test.sh13
-rw-r--r--contrib/gate/Dockerfile69
-rw-r--r--contrib/gate/README.md4
-rwxr-xr-xcontrib/gate/entrypoint.sh15
-rw-r--r--contrib/python/podman/Makefile8
-rw-r--r--contrib/python/podman/podman/libs/_containers_attach.py8
-rw-r--r--contrib/python/podman/podman/libs/containers.py83
-rw-r--r--contrib/python/podman/podman/libs/images.py7
-rw-r--r--contrib/python/podman/test/test_containers.py2
-rw-r--r--contrib/python/podman/test/test_images.py2
-rwxr-xr-xcontrib/python/podman/test/test_runner.sh1
-rw-r--r--contrib/python/pypodman/Makefile6
-rw-r--r--contrib/python/pypodman/docs/man1/pypodman.12
-rw-r--r--contrib/python/pypodman/pypodman/lib/__init__.py9
-rw-r--r--contrib/python/pypodman/pypodman/lib/action_base.py30
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/__init__.py4
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/_create_args.py61
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/commit_action.py27
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/create_action.py2
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/history_action.py10
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/images_action.py6
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/info_action.py4
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/inspect_action.py23
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/kill_action.py12
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pause_action.py4
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/create_parser.py9
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/kill_parser.py14
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/pause_parser.py6
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py15
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/remove_parser.py13
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/restart_parser.py6
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/start_parser.py6
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/stop_parser.py6
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/unpause_parser.py6
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod_action.py2
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/port_action.py6
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/ps_action.py19
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/push_action.py10
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/restart_action.py4
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/rm_action.py12
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/rmi_action.py12
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/run_action.py2
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/search_action.py16
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/start_action.py71
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/version_action.py35
-rw-r--r--contrib/python/pypodman/pypodman/lib/parser_actions.py133
-rw-r--r--contrib/python/pypodman/pypodman/lib/podman_parser.py29
-rw-r--r--contrib/spec/podman.spec.in12
-rw-r--r--docs/libpod.conf.5.md12
-rw-r--r--docs/podman-container-checkpoint.1.md19
-rw-r--r--docs/podman-container-exists.1.md40
-rw-r--r--docs/podman-container-prune.1.md31
-rw-r--r--docs/podman-container-restore.1.md16
-rw-r--r--docs/podman-container-runlabel.1.md4
-rw-r--r--docs/podman-container.1.md3
-rw-r--r--docs/podman-create.1.md38
-rw-r--r--docs/podman-generate-kube.1.md151
-rw-r--r--docs/podman-generate.1.md19
-rw-r--r--docs/podman-image-exists.1.md40
-rw-r--r--docs/podman-image-prune.1.md32
-rw-r--r--docs/podman-image-trust.1.md81
-rw-r--r--docs/podman-image.1.md3
-rw-r--r--docs/podman-images.1.md44
-rw-r--r--docs/podman-login.1.md8
-rw-r--r--docs/podman-pod-create.1.md9
-rw-r--r--docs/podman-pod-exists.1.md40
-rw-r--r--docs/podman-pod-stop.1.md34
-rw-r--r--docs/podman-ps.1.md9
-rw-r--r--docs/podman-pull.1.md4
-rw-r--r--docs/podman-push.1.md4
-rw-r--r--docs/podman-rm.1.md19
-rw-r--r--docs/podman-rmi.1.md18
-rw-r--r--docs/podman-run.1.md36
-rw-r--r--docs/podman-search.1.md4
-rw-r--r--docs/podman-version.1.md23
-rw-r--r--docs/podman-volume-create.1.md48
-rw-r--r--docs/podman-volume-inspect.1.md45
-rw-r--r--docs/podman-volume-ls.1.md49
-rw-r--r--docs/podman-volume-prune.1.md38
-rw-r--r--docs/podman-volume-rm.1.md45
-rw-r--r--docs/podman-volume.1.md23
-rw-r--r--docs/podman.1.md35
-rw-r--r--docs/tutorials/podman_tutorial.md101
-rwxr-xr-xhack/get_ci_vm.sh85
-rw-r--r--install.md2
-rw-r--r--libpod/boltdb_state.go463
-rw-r--r--libpod/boltdb_state_internal.go177
-rw-r--r--libpod/common/common.go23
-rw-r--r--libpod/common/docker_registry_options.go35
-rw-r--r--libpod/common/output_interfaces.go1
-rw-r--r--libpod/common_test.go5
-rw-r--r--libpod/container.go34
-rw-r--r--libpod/container_api.go87
-rw-r--r--libpod/container_easyjson.go13
-rw-r--r--libpod/container_graph_test.go4
-rw-r--r--libpod/container_inspect.go16
-rw-r--r--libpod/container_internal.go343
-rw-r--r--libpod/container_internal_linux.go405
-rw-r--r--libpod/container_internal_unsupported.go4
-rw-r--r--libpod/errors.go14
-rw-r--r--libpod/image/docker_registry_options.go5
-rw-r--r--libpod/image/errors.go15
-rw-r--r--libpod/image/image.go34
-rw-r--r--libpod/image/image_test.go8
-rw-r--r--libpod/image/prune.go26
-rw-r--r--libpod/image/pull.go33
-rw-r--r--libpod/image/utils.go22
-rw-r--r--libpod/in_memory_state.go180
-rw-r--r--libpod/info.go2
-rw-r--r--libpod/kube.go440
-rw-r--r--libpod/mounts_linux.go18
-rw-r--r--libpod/networking_linux.go11
-rw-r--r--libpod/oci.go50
-rw-r--r--libpod/oci_linux.go19
-rw-r--r--libpod/oci_unsupported.go2
-rw-r--r--libpod/options.go172
-rw-r--r--libpod/pod.go4
-rw-r--r--libpod/pod_api.go17
-rw-r--r--libpod/pod_easyjson.go128
-rw-r--r--libpod/pod_internal.go2
-rw-r--r--libpod/runtime.go202
-rw-r--r--libpod/runtime_ctr.go41
-rw-r--r--libpod/runtime_img.go35
-rw-r--r--libpod/runtime_pod_infra_linux.go10
-rw-r--r--libpod/runtime_volume.go107
-rw-r--r--libpod/runtime_volume_linux.go132
-rw-r--r--libpod/state.go49
-rw-r--r--libpod/state_test.go7
-rw-r--r--libpod/testdata/config.toml2
-rw-r--r--libpod/util.go69
-rw-r--r--libpod/util_linux.go24
-rw-r--r--libpod/volume.go63
-rw-r--r--libpod/volume_internal.go29
-rw-r--r--pkg/inspect/inspect.go1
-rw-r--r--pkg/lookup/lookup.go18
-rw-r--r--pkg/namespaces/namespaces.go7
-rw-r--r--pkg/registries/registries.go37
-rw-r--r--pkg/resolvconf/resolvconf.go12
-rw-r--r--pkg/rootless/rootless_linux.go40
-rw-r--r--pkg/secrets/secrets.go9
-rw-r--r--pkg/spec/createconfig.go12
-rw-r--r--pkg/spec/spec.go3
-rw-r--r--pkg/trust/trust.go250
-rw-r--r--pkg/util/utils.go101
-rw-r--r--pkg/varlinkapi/containers.go49
-rw-r--r--pkg/varlinkapi/containers_create.go9
-rw-r--r--pkg/varlinkapi/images.go144
-rw-r--r--pkg/varlinkapi/mount.go49
-rw-r--r--pkg/varlinkapi/pods.go24
-rw-r--r--test/README.md61
-rw-r--r--test/bin2img/bin2img.go5
-rw-r--r--test/e2e/attach_test.go5
-rw-r--r--test/e2e/checkpoint_test.go172
-rw-r--r--test/e2e/commit_test.go5
-rw-r--r--test/e2e/create_staticip_test.go88
-rw-r--r--test/e2e/create_test.go95
-rw-r--r--test/e2e/diff_test.go5
-rw-r--r--test/e2e/exec_test.go5
-rw-r--r--test/e2e/exists_test.go117
-rw-r--r--test/e2e/export_test.go5
-rw-r--r--test/e2e/generate_kube_test.go106
-rw-r--r--test/e2e/history_test.go5
-rw-r--r--test/e2e/images_test.go9
-rw-r--r--test/e2e/import_test.go5
-rw-r--r--test/e2e/info_test.go5
-rw-r--r--test/e2e/inspect_test.go5
-rw-r--r--test/e2e/kill_test.go5
-rw-r--r--test/e2e/libpod_suite_test.go488
-rw-r--r--test/e2e/load_test.go9
-rw-r--r--test/e2e/logs_test.go5
-rw-r--r--test/e2e/mount_test.go5
-rw-r--r--test/e2e/namespace_test.go5
-rw-r--r--test/e2e/pause_test.go24
-rw-r--r--test/e2e/pod_create_test.go44
-rw-r--r--test/e2e/pod_infra_container_test.go5
-rw-r--r--test/e2e/pod_inspect_test.go5
-rw-r--r--test/e2e/pod_kill_test.go5
-rw-r--r--test/e2e/pod_pause_test.go5
-rw-r--r--test/e2e/pod_pod_namespaces.go5
-rw-r--r--test/e2e/pod_ps_test.go5
-rw-r--r--test/e2e/pod_restart_test.go5
-rw-r--r--test/e2e/pod_rm_test.go5
-rw-r--r--test/e2e/pod_start_test.go5
-rw-r--r--test/e2e/pod_stats_test.go5
-rw-r--r--test/e2e/pod_stop_test.go5
-rw-r--r--test/e2e/pod_top_test.go5
-rw-r--r--test/e2e/port_test.go5
-rw-r--r--test/e2e/prune_test.go88
-rw-r--r--test/e2e/ps_test.go5
-rw-r--r--test/e2e/pull_test.go13
-rw-r--r--test/e2e/push_test.go45
-rw-r--r--test/e2e/refresh_test.go13
-rw-r--r--test/e2e/restart_test.go7
-rw-r--r--test/e2e/rm_test.go5
-rw-r--r--test/e2e/rmi_test.go26
-rw-r--r--test/e2e/rootless_test.go33
-rw-r--r--test/e2e/run_cgroup_parent_test.go11
-rw-r--r--test/e2e/run_cleanup_test.go9
-rw-r--r--test/e2e/run_cpu_test.go5
-rw-r--r--test/e2e/run_device_test.go5
-rw-r--r--test/e2e/run_dns_test.go5
-rw-r--r--test/e2e/run_entrypoint_test.go5
-rw-r--r--test/e2e/run_exit_test.go5
-rw-r--r--test/e2e/run_memory_test.go5
-rw-r--r--test/e2e/run_networking_test.go44
-rw-r--r--test/e2e/run_ns_test.go11
-rw-r--r--test/e2e/run_passwd_test.go5
-rw-r--r--test/e2e/run_privileged_test.go17
-rw-r--r--test/e2e/run_restart_test.go7
-rw-r--r--test/e2e/run_selinux_test.go5
-rw-r--r--test/e2e/run_signal_test.go23
-rw-r--r--test/e2e/run_staticip_test.go16
-rw-r--r--test/e2e/run_test.go77
-rw-r--r--test/e2e/run_userns_test.go5
-rw-r--r--test/e2e/runlabel_test.go5
-rw-r--r--test/e2e/save_test.go5
-rw-r--r--test/e2e/search_test.go19
-rw-r--r--test/e2e/start_test.go31
-rw-r--r--test/e2e/stats_test.go5
-rw-r--r--test/e2e/stop_test.go19
-rw-r--r--test/e2e/systemd_test.go81
-rw-r--r--test/e2e/tag_test.go5
-rw-r--r--test/e2e/top_test.go5
-rw-r--r--test/e2e/trust_test.go72
-rw-r--r--test/e2e/version_test.go5
-rw-r--r--test/e2e/volume_create_test.go60
-rw-r--r--test/e2e/volume_inspect_test.go77
-rw-r--r--test/e2e/volume_ls_test.go84
-rw-r--r--test/e2e/volume_prune_test.go64
-rw-r--r--test/e2e/volume_rm_test.go91
-rw-r--r--test/e2e/wait_test.go5
-rw-r--r--test/goecho/goecho.go29
-rw-r--r--test/install/Dockerfile.Fedora4
-rw-r--r--test/system/libpod_suite_test.go217
-rw-r--r--test/system/version_test.go51
-rw-r--r--test/utils/common_function_test.go150
-rw-r--r--test/utils/podmansession_test.go90
-rw-r--r--test/utils/podmantest_test.go74
-rw-r--r--test/utils/utils.go432
-rw-r--r--test/utils/utils_suite_test.go52
-rw-r--r--troubleshooting.md60
-rw-r--r--vendor.conf11
-rw-r--r--vendor/github.com/containers/buildah/README.md2
-rw-r--r--vendor/github.com/containers/buildah/buildah.go7
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go45
-rw-r--r--vendor/github.com/containers/buildah/commit.go62
-rw-r--r--vendor/github.com/containers/buildah/common.go30
-rw-r--r--vendor/github.com/containers/buildah/config.go34
-rw-r--r--vendor/github.com/containers/buildah/delete.go3
-rw-r--r--vendor/github.com/containers/buildah/docker/types.go5
-rw-r--r--vendor/github.com/containers/buildah/image.go105
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go268
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go140
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go25
-rw-r--r--vendor/github.com/containers/buildah/info.go207
-rw-r--r--vendor/github.com/containers/buildah/new.go82
-rw-r--r--vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go517
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go18
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go2
-rw-r--r--vendor/github.com/containers/buildah/pull.go20
-rw-r--r--vendor/github.com/containers/buildah/run.go195
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.go4
-rw-r--r--vendor/github.com/containers/buildah/util.go8
-rw-r--r--vendor/github.com/containers/buildah/util/util.go66
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf11
-rw-r--r--vendor/github.com/containers/image/copy/copy.go101
-rw-r--r--vendor/github.com/containers/image/directory/directory_dest.go27
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go4
-rw-r--r--vendor/github.com/containers/image/docker/cache.go23
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go144
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go159
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go37
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go4
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go7
-rw-r--r--vendor/github.com/containers/image/image/oci.go3
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_dest.go31
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go8
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_dest.go33
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go6
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go34
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go36
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go6
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go329
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/default.go63
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/memory.go123
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/none.go47
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go108
-rw-r--r--vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go86
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go110
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_src.go5
-rw-r--r--vendor/github.com/containers/image/types/types.go128
-rw-r--r--vendor/github.com/containers/image/vendor.conf3
-rw-r--r--vendor/github.com/containers/storage/README.md2
-rw-r--r--vendor/github.com/containers/storage/containers.go10
-rw-r--r--vendor/github.com/containers/storage/containers_ffjson.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go24
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy.go277
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go15
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go25
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/copy_linux.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go9
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go10
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go9
-rw-r--r--vendor/github.com/containers/storage/layers.go17
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/parser.go56
-rw-r--r--vendor/github.com/containers/storage/store.go118
-rw-r--r--vendor/github.com/containers/storage/vendor.conf6
-rw-r--r--vendor/github.com/google/shlex/COPYING202
-rw-r--r--vendor/github.com/google/shlex/README2
-rw-r--r--vendor/github.com/google/shlex/shlex.go416
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label.go18
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go13
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go17
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go13
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go39
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go23
-rw-r--r--vendor/github.com/openshift/imagebuilder/evaluator.go3
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go94
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h12
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go96
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go15
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go72
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go110
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go4
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go0
-rw-r--r--version/version.go2
471 files changed, 17584 insertions, 4203 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index f78205a49..7afd8f0b3 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -5,48 +5,148 @@
# and storage.
gcp_credentials: ENCRYPTED[885c6e4297dd8d6f67593c42b810353af0c505a7a670e2c6fd830c56e86bbb2debcc3c18f942d0d46ab36b63521061d4]
-# Default VM to use for testing, unless values overriden by specific tasks (below)
-gce_instance:
- image_project: "libpod-218412"
- zone: "us-central1-a" # Required by Cirrus for the time being
- cpu: 2
- memory: "4Gb"
- disk: 40
-
-# Main collection of env. varss to set for all scripts. All others
-# are cooked in by $SCRIPT_BASE/setup_environment.sh
+# Default timeout for each task
+timeout_in: 120m
+
+# Main collection of env. vars to set for all tasks and scripts.
env:
- FEDORA_CNI_COMMIT: "412b6d31280682bb4fab4446f113c22ff1886554"
- CNI_COMMIT: "7480240de9749f9a0a5c8614b17f1f03e0c06ab9"
- CRIO_COMMIT: "662dbb31b5d4f5ed54511a47cde7190c61c28677"
- CRIU_COMMIT: "584cbe4643c3fc7dc901ff08bf923ca0fe7326f9"
- RUNC_COMMIT: "78ef28e63bec2ee4c139b5e3e0d691eb9bdc748d"
+ ####
+ #### Global variables used for all tasks
+ ####
# File to update in home-dir with task-specific env. var values
ENVLIB: ".bash_profile"
# Overrides default location (/tmp/cirrus) for repo clone
- CIRRUS_WORKING_DIR: "/go/src/github.com/containers/libpod"
+ CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/libpod"
# Required so $ENVLIB gets loaded
CIRRUS_SHELL: "/bin/bash"
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
PACKER_BASE: "./contrib/cirrus/packer"
+ CIRRUS_CLONE_DEPTH: 200
+
+ ####
+ #### Variables for composing new cache-images (used in PR testing) from
+ #### base-images (pre-existing in GCE)
+ ####
+ # Git commits to use while building dependencies into cache-images
+ FEDORA_CNI_COMMIT: "412b6d31280682bb4fab4446f113c22ff1886554"
+ CNI_COMMIT: "7480240de9749f9a0a5c8614b17f1f03e0c06ab9"
+ CRIO_COMMIT: "7a283c391abb7bd25086a8ff91dbb36ebdd24466"
+ CRIU_COMMIT: "c74b83cd49c00589c0c0468ba5fe685b67fdbd0a"
+ RUNC_COMMIT: "25f3f893c86d07426df93b7aa172f33fdf093fbd"
+ # CSV of cache-image names to build (see $PACKER_BASE/libpod_images.json)
+ PACKER_BUILDS: "ubuntu-18,fedora-29,fedora-28" # TODO: fah-29,rhel-7,centos-7
+ # Version of packer to use
+ PACKER_VER: "1.3.1"
+ # Google-maintained base-image names
+ UBUNTU_BASE_IMAGE: "ubuntu-1804-bionic-v20181203a"
+ CENTOS_BASE_IMAGE: "centos-7-v20181113"
+ # Manually produced base-image names (see $SCRIPT_BASE/README.md)
+ FEDORA_BASE_IMAGE: "fedora-cloud-base-29-1-2-1541789245"
+ PRIOR_FEDORA_BASE_IMAGE: "fedora-cloud-base-28-1-1-1544474897"
+ FAH_BASE_IMAGE: "fedora-atomichost-29-20181025-1-1541787861"
+ # RHEL image must be imported, google bills extra for their native image.
+ RHEL_BASE_IMAGE: "rhel-guest-image-7-6-210-x86-64-qcow2-1541783972"
+
+ ####
+ #### Credentials and other secret-sauces, decrypted at runtime when authorized.
+ ####
+ # Freenode IRC credentials for posting status messages
IRCID: ENCRYPTED[e87bba62a8e924dc70bdb2b66b16f6ab4a60d2870e6e5534ae9e2b0076f483c71c84091c655ca239101e6816c5ec0883]
+ # Command to register a RHEL VM to install/update packages
+ RHSM_COMMAND: ENCRYPTED[5caa5ff8c5370c3d25c7a1a28168501ab0fa2e5e3b627926f6eaba02b3fed965a7638a6151657809661f8c905c7dc187]
+ # Needed to build GCE images, within a GCE VM
+ SERVICE_ACCOUNT: ENCRYPTED[99e9a0b1c23f8dd29e83dfdf164f064cfd17afd9b895ca3b5e4c41170bd4290a8366fe2ad8e7a210b9f751711d1d002a]
+ # User ID for cirrus to ssh into VMs
+ GCE_SSH_USERNAME: cirrus-ci
+ # Name where this repositories cloud resources are located
+ GCP_PROJECT_ID: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
+
+ # Space separated list of environment variables to unset before testing
+ UNSET_ENV_VARS: >-
+ GCP_PROJECT_ID GCE_SSH_USERNAME SERVICE_ACCOUNT RHSM_COMMAND BUILT_IMAGE_SUFFIX
+ IRCID RHEL_BASE_IMAGE FAH_BASE_IMAGE FEDORA_BASE_IMAGE CENTOS_BASE_IMAGE
+ UBUNTU_BASE_IMAGE PACKER_VER PACKER_BUILDS RUNC_COMMIT CRIU_COMMIT
+ CRIO_COMMIT CNI_COMMIT FEDORA_CNI_COMMIT PACKER_BASE SCRIPT_BASE
+ CIRRUS_SHELL CIRRUS_WORKING_DIR ENVLIB BUILT_IMAGE_SUFFIX CIRRUS_CI
+ CI_NODE_INDEX CI_NODE_TOTAL CIRRUS_BASE_BRANCH CIRRUS_BASE_SHA
+ CIRRUS_BRANCH CIRRUS_BUILD_ID CIRRUS_CHANGE_IN_REPO CIRRUS_CLONE_DEPTH
+ CIRRUS_COMMIT_MESSAGE CIRRUS_CHANGE_MESSAGE CIRRUS_REPO_CLONE_HOST
+ CIRRUS_DEFAULT_BRANCH CIRRUS_PR CIRRUS_TAG CIRRUS_OS CIRRUS_TASK_NAME
+ CIRRUS_TASK_ID CIRRUS_REPO_NAME CIRRUS_REPO_OWNER CIRRUS_REPO_FULL_NAME
+ CIRRUS_REPO_CLONE_URL CIRRUS_SHELL CIRRUS_USER_COLLABORATOR CIRRUS_USER_PERMISSION
+ CIRRUS_WORKING_DIR CIRRUS_HTTP_CACHE_HOST PACKER_BUILDS BUILT_IMAGE_SUFFIX
+ XDG_DATA_DIRS XDG_RUNTIME_DIR XDG_SESSION_ID
+
+
+# Every *_task runs in parallel in separate VMsd. The name prefix only for reference
+# in WebUI, and will be followed by matrix details. This task gates all others with
+# quick format, lint, and unit tests on the standard platform.
+gating_task:
-# Every *_task runs in parallel in separate VMs. The name prefix only for reference
-# in WebUI, and will be followed by matrix details. This task does all the
-# per-pr unit/integration testing.
-full_vm_testing_task:
+ env:
+ CIRRUS_WORKING_DIR: "/usr/src/libpod"
+
+ # Runs within Cirrus's "community cluster"
+ container:
+ image: "quay.io/libpod/gate:latest"
+ cpu: 4
+ memory: 12
+
+ gate_script:
+ - '/usr/local/bin/entrypoint.sh validate'
+ - '/usr/local/bin/entrypoint.sh lint'
+
+
+build_each_commit_task:
+
+ depends_on:
+ - "gating"
+
+ # $CIRRUS_BASE_BRANCH is only set when testing a PR
+ only_if: $CIRRUS_BRANCH != 'master'
gce_instance:
- # Generate multiple 'test' tasks, covering all possible
- # 'matrix' combinations. All run in parallel.
+ image_project: "libpod-218412"
+ zone: "us-central1-a" # Required by Cirrus for the time being
+ cpu: 2
+ memory: "4Gb"
+ disk: 40
matrix:
- # Images are generated separetly, from build_images_task (below)
- image_name: "ubuntu-1804-bionic-v20180911-libpod-63a86a18"
- # TODO: Make these work (also build_images_task below)
- #image_name: "rhel-server-ec2-7-5-165-1-libpod-fce09afe"
- #image_name: "centos-7-v20180911-libpod-fce09afe"
- #image_name: "fedora-cloud-base-28-1-1-7-libpod-fce09afe"
+ image_name: "fedora-29-libpod-0c954a67"
+
+ timeout_in: 20m
+
+ script:
+ - $SCRIPT_BASE/setup_environment.sh
+ - git fetch --depth $CIRRUS_CLONE_DEPTH origin $CIRRUS_BASE_BRANCH
+ - env GOPATH=/var/tmp/go/ make build-all-new-commits GIT_BASE_BRANCH=origin/$CIRRUS_BASE_BRANCH
+
+# This task does the unit and integration testing for every platform
+testing_task:
+
+ depends_on:
+ - "gating"
+ - "build_each_commit"
+
+ gce_instance:
+ image_project: "libpod-218412"
+ zone: "us-central1-a" # Required by Cirrus for the time being
+ cpu: 2
+ memory: "4Gb"
+ disk: 200 # see https://developers.google.com/compute/docs/disks#performance
+ # Generate multiple parallel tasks, covering all possible
+ # 'matrix' combinations.
+ matrix:
+ # Images are generated separately, from build_images_task (below)
+ image_name: "ubuntu-18-libpod-86d821ea"
+ image_name: "fedora-28-libpod-86d821ea"
+ image_name: "fedora-29-libpod-86d821ea"
+
+ # TODO: tests fail
+ # image_name: "rhel-7-something-something"
+ # image_name: "centos-7-something-something"
+ # image_name: "fah-29-libpod-5070733157859328"
timeout_in: 120m
@@ -55,70 +155,74 @@ full_vm_testing_task:
setup_environment_script: $SCRIPT_BASE/setup_environment.sh
# ...or lists of strings
- verify_source_script:
- - whoami # root!
- - $SCRIPT_BASE/verify_source.sh
-
- unit_test_script: $SCRIPT_BASE/unit_test.sh
+ unit_test_script:
+ - go version
+ - $SCRIPT_BASE/unit_test.sh
integration_test_script: $SCRIPT_BASE/integration_test.sh
- optional_system_test_script: $SCRIPT_BASE/optional_system_test.sh
- success_script: $SCRIPT_BASE/success.sh
+# Because system tests are stored within the repository, it is sometimes
+# necessary to execute them within a PR to validate changes.
+optional_testing_task:
+ # Only run system tests in PRs (not on merge) if magic string is present
+ # in the PR description. Post-merge system testing is assumed to happen
+ # later from OS distribution's build systems.
+ only_if: >-
+ $CIRRUS_BRANCH != 'master' &&
+ $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*SYSTEM\s*TEST\s*\*\*\*.*'
-# This task build new images for future PR testing, but only after a PR merge.
-# These images save needing to install/setup the same environment to test every
-# PR. The 'active' image for testing is selected by the 'image_name' items in
-# task above. Currently this requires manually updating them, but this could
-# be automated (see comment at end).
+ gce_instance:
+ image_project: "libpod-218412"
+ matrix:
+ image_name: "ubuntu-18-libpod-86d821ea"
+ image_name: "fedora-28-libpod-86d821ea"
+ image_name: "fedora-29-libpod-86d821ea"
+ # TODO: Make these work (also build_images_task below)
+ #image_name: "rhel-server-ec2-7-5-165-1-libpod-fce09afe"
+ #image_name: "centos-7-v20180911-libpod-fce09afe"
+
+ timeout_in: 60m
+
+ setup_environment_script: $SCRIPT_BASE/setup_environment.sh
+ system_test_script: $SCRIPT_BASE/system_test.sh
-build_vm_images_task:
- # Only produce new images after a PR merge
- only_if: $CIRRUS_BRANCH == 'master'
+
+# Build new cache-images for future PR testing, but only after a PR merge.
+# The cache-images save install/setup time needed test every PR. The 'active' images
+# are selected by the 'image_name' items tasks above. Currently this requires
+# manually updating the names, but this could be automated (see comment below).
+cache_images_task:
+ # Only produce new cache-images after a PR merge, and if a magic string
+ # is present in the most recent commit-message.
+ only_if: >-
+ $CIRRUS_BRANCH == 'master' &&
+ $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*REBUILD\s*IMAGES\s*\*\*\*.*'
# Require tests to pass first.
depends_on:
- - test # i.e. 'test_task'
-
- env:
- # CSV of packer builder names to enable (see $PACKER_BASE/libpod_images.json)
- PACKER_BUILDS: "ubuntu-18"
- # TODO: Make these work (also full_vm_testing_task above)
- # PACKER_BUILDS: "rhel-7,centos-7,fedora-28,ubuntu-18"
- CENTOS_BASE_IMAGE: "centos-7-v20180911"
- RHEL_BASE_IMAGE: "rhel-server-ec2-7-5-165-1"
- FEDORA_BASE_IMAGE: "fedora-cloud-base-28-1-1-7"
- UBUNTU_BASE_IMAGE: "ubuntu-1804-bionic-v20180911"
- # low-level base VM image name inputs to packer
-
- # Command to register a RHEL VM
- RHSM_COMMAND: ENCRYPTED[5caa5ff8c5370c3d25c7a1a28168501ab0fa2e5e3b627926f6eaba02b3fed965a7638a6151657809661f8c905c7dc187]
- # Additional environment variables needed to build GCE images, within a GCE VM
- SERVICE_ACCOUNT: ENCRYPTED[99e9a0b1c23f8dd29e83dfdf164f064cfd17afd9b895ca3b5e4c41170bd4290a8366fe2ad8e7a210b9f751711d1d002a]
- GCE_SSH_USERNAME: ENCRYPTED[a7706b9e4b8bbb47f76358df7407f4fffa2e8552531190cc0b3315180c4b50588f560c4f85731e99cb5f43a396778277]
- GCP_PROJECT_ID: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
- # Version of packer to use
- PACKER_VER: "1.3.1"
+ - "gating"
+ - "testing"
# VMs created by packer are not cleaned up by cirrus
auto_cancellation: $CI != "true"
gce_instance:
- image_name: "image-builder-image" # Simply CentOS 7 + packer dependencies
+ image_project: "libpod-218412"
+ zone: "us-central1-a" # Required by Cirrus for the time being
+ cpu: 4
+ memory: "4Gb"
+ disk: 200
+ image_name: "image-builder-image-1541772081" # Simply CentOS 7 + packer dependencies
# Additional permissions for building GCE images, within a GCE VM
scopes:
- compute
- devstorage.full_control
- # Doesn't need many local resources to run
- cpu: 2
- memory: "2Gb"
- disk: 20
environment_script: $SCRIPT_BASE/setup_environment.sh
build_vm_images_script: $SCRIPT_BASE/build_vm_images.sh
- # TODO,Continuous Delivery: Automaticly open a libpod PR after using 'sed' to replace
+ # TODO,Continuous Delivery: Automatically open a libpod PR after using 'sed' to replace
# the image_names with the new (just build) images. That will
# cause a new round of testing to happen (via the PR) using
# the new images. When all is good, the PR may be manually
@@ -128,3 +232,23 @@ build_vm_images_task:
# - clone_podman_release_branch.sh
# - modify_cirrus_yaml_image_names.sh
# - commit_and_create_upstream_pr.sh
+
+
+# Post message to IRC if everything passed
+success_task:
+
+ depends_on: # ignores any dependent task conditions
+ - "gating"
+ - "testing"
+ - "optional_testing"
+ - "cache_images"
+
+ env:
+ CIRRUS_WORKING_DIR: "/usr/src/libpod"
+
+ container:
+ image: "quay.io/libpod/gate:latest"
+ cpu: 1
+ memory: 1
+
+ success_script: $SCRIPT_BASE/success.sh
diff --git a/.papr.yml b/.papr.yml
index 26b527a36..ed20c6039 100644
--- a/.papr.yml
+++ b/.papr.yml
@@ -22,33 +22,33 @@ context: "FAH28 - Containerized (Podman in Podman)"
---
- host:
- distro: centos/7/atomic/smoketested
- specs:
- ram: 8192
- cpus: 4
- extra-repos:
- - name: epel
- metalink: https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
- gpgcheck: 0
- - name: cri-o
- baseurl: https://cbs.centos.org/repos/virt7-container-common-candidate/$basearch/os
- gpgcheck: 0
-
- required: true
+host:
+ distro: centos/7/atomic/smoketested
+ specs:
+ ram: 8192
+ cpus: 4
+extra-repos:
+ - name: epel
+ metalink: https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
+ gpgcheck: 0
+ - name: cri-o
+ baseurl: https://cbs.centos.org/repos/virt7-container-common-candidate/$basearch/os
+ gpgcheck: 0
- timeout: 90m
+required: true
- tests:
- - CONTAINER_RUNTIME="docker" sh .papr_prepare.sh
+timeout: 90m
- artifacts:
- - build.log
+tests:
+ - CONTAINER_RUNTIME="docker" sh .papr_prepare.sh
- context: "CAH 7-smoketested - Containerized (Podman in Docker)"
+artifacts:
+ - build.log
----
+context: "CAH 7-smoketested - Containerized (Podman in Docker)"
+#---
+#
#host:
# distro: centos/7/cloud
# specs:
@@ -95,82 +95,3 @@ context: "FAH28 - Containerized (Podman in Podman)"
#context: "CentOS 7 Cloud"
#
#---
-
-host:
- distro: fedora/28/cloud
- specs:
- ram: 8192
- cpus: 4
-packages:
- - btrfs-progs-devel
- - glib2-devel
- - glibc-devel
- - glibc-static
- - git
- - go-md2man
- - gpgme-devel
- - libassuan-devel
- - libgpg-error-devel
- - libseccomp-devel
- - libselinux-devel
- - ostree-devel
- - pkgconfig
- - make
- - nc
- - go-compilers-golang-compiler
- - podman
- - python3-varlink
- - python3-dateutil
- - python3-psutil
- - https://kojipkgs.fedoraproject.org//packages/runc/1.0.0/55.dev.git578fe65.fc28/x86_64/runc-1.0.0-55.dev.git578fe65.fc28.x86_64.rpm
-
-tests:
- - sed 's/^expand-check.*/expand-check=0/g' -i /etc/selinux/semanage.conf
- - yum -y reinstall container-selinux
- - sh .papr.sh -b -i -t -p
-
-required: false
-
-timeout: 90m
-context: "Fedora 28 Cloud"
-
----
-
-host:
- distro: fedora/29/cloud/pungi
- specs:
- ram: 8192
- cpus: 4
-packages:
- - btrfs-progs-devel
- - glib2-devel
- - glibc-devel
- - glibc-static
- - git
- - go-md2man
- - gpgme-devel
- - libassuan-devel
- - libgpg-error-devel
- - libseccomp-devel
- - libselinux-devel
- - ostree-devel
- - pkgconfig
- - make
- - nc
- - go-compilers-golang-compiler
- - podman
- - python3-varlink
- - python3-dateutil
- - python3-psutil
- - container-selinux
- - https://kojipkgs.fedoraproject.org//packages/runc/1.0.0/54.dev.git00dc700.fc28/x86_64/runc-1.0.0-54.dev.git00dc700.fc28.x86_64.rpm
-
-tests:
- - sed 's/^expand-check.*/expand-check=0/g' -i /etc/selinux/semanage.conf
- - dnf -y reinstall container-selinux
- - sh .papr.sh -b -i -t
-
-required: false
-
-timeout: 90m
-context: "Fedora 29 Cloud"
diff --git a/.papr_prepare.sh b/.papr_prepare.sh
index e0657dcd2..5d7d21530 100644
--- a/.papr_prepare.sh
+++ b/.papr_prepare.sh
@@ -10,6 +10,13 @@ if [[ ${DIST} != "Fedora" ]]; then
PYTHON=python
fi
+# Since CRIU 3.11 has been pushed to Fedora 28 the checkpoint/restore
+# test cases are actually run. As CRIU uses iptables to lock and unlock
+# the network during checkpoint and restore it needs the following two
+# modules loaded.
+modprobe ip6table_nat || :
+modprobe iptable_nat || :
+
# Build the test image
${CONTAINER_RUNTIME} build -t ${IMAGE} -f Dockerfile.${DIST} . 2>build.log
diff --git a/.tool/lint b/.tool/lint
index b7006c8fd..f7bf81c1d 100755
--- a/.tool/lint
+++ b/.tool/lint
@@ -40,6 +40,8 @@ ${LINTER} \
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$'\
--exclude='duplicate of.*_test.go.*\(dupl\)$'\
--exclude='cmd\/client\/.*\.go.*\(dupl\)$'\
+ --exclude='libpod\/.*_easyjson.go:.*'\
+ --exclude='.* other occurrence\(s\) of "(container|host|tmpfs|unknown)" found in: .*\(goconst\)$'\
--exclude='vendor\/.*'\
--exclude='podman\/.*'\
--exclude='server\/seccomp\/.*\.go.*$'\
diff --git a/API.md b/API.md
index 34d401aca..7bc866d98 100755
--- a/API.md
+++ b/API.md
@@ -9,6 +9,14 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func Commit(name: string, image_name: string, changes: []string, author: string, message: string, pause: bool, manifestType: string) string](#Commit)
+[func ContainerCheckpoint(name: string, keep: bool, leaveRunning: bool, tcpEstablished: bool) string](#ContainerCheckpoint)
+
+[func ContainerExists(name: string) int](#ContainerExists)
+
+[func ContainerRestore(name: string, keep: bool, tcpEstablished: bool) string](#ContainerRestore)
+
+[func ContainerRunlabel(runlabel: Runlabel) ](#ContainerRunlabel)
+
[func CreateContainer(create: Create) string](#CreateContainer)
[func CreateImage() NotImplemented](#CreateImage)
@@ -43,6 +51,8 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func HistoryImage(name: string) ImageHistory](#HistoryImage)
+[func ImageExists(name: string) int](#ImageExists)
+
[func ImportImage(source: string, reference: string, message: string, changes: []string) string](#ImportImage)
[func InspectContainer(name: string) string](#InspectContainer)
@@ -57,6 +67,10 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func ListContainerChanges(name: string) ContainerChanges](#ListContainerChanges)
+[func ListContainerMounts() []string](#ListContainerMounts)
+
+[func ListContainerPorts(name: string) NotImplemented](#ListContainerPorts)
+
[func ListContainerProcesses(name: string, opts: []string) []string](#ListContainerProcesses)
[func ListContainers() ListContainerData](#ListContainers)
@@ -65,15 +79,17 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func ListPods() ListPodData](#ListPods)
+[func MountContainer(name: string) string](#MountContainer)
+
[func PauseContainer(name: string) string](#PauseContainer)
[func PausePod(name: string) string](#PausePod)
[func Ping() StringResponse](#Ping)
-[func PullImage(name: string) string](#PullImage)
+[func PullImage(name: string, certDir: string, creds: string, signaturePolicy: string, tlsVerify: bool) string](#PullImage)
-[func PushImage(name: string, tag: string, tlsverify: bool) string](#PushImage)
+[func PushImage(name: string, tag: string, tlsverify: bool, signaturePolicy: string, creds: string, certDir: string, compress: bool, format: string, removeSignatures: bool, signBy: string) string](#PushImage)
[func RemoveContainer(name: string, force: bool) string](#RemoveContainer)
@@ -97,12 +113,14 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func StopContainer(name: string, timeout: int) string](#StopContainer)
-[func StopPod(name: string) string](#StopPod)
+[func StopPod(name: string, timeout: int) string](#StopPod)
[func TagImage(name: string, tagged: string) string](#TagImage)
[func TopPod() NotImplemented](#TopPod)
+[func UnmountContainer(name: string, force: bool) ](#UnmountContainer)
+
[func UnpauseContainer(name: string) string](#UnpauseContainer)
[func UnpausePod(name: string) string](#UnpausePod)
@@ -165,6 +183,8 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[type PodmanInfo](#PodmanInfo)
+[type Runlabel](#Runlabel)
+
[type Sockets](#Sockets)
[type StringResponse](#StringResponse)
@@ -211,6 +231,37 @@ attributes: _CMD, ENTRYPOINT, ENV, EXPOSE, LABEL, ONBUILD, STOPSIGNAL, USER, VOL
container while it is being committed, pass a _true_ bool for the pause argument. If the container cannot
be found by the ID or name provided, a (ContainerNotFound)[#ContainerNotFound] error will be returned; otherwise,
the resulting image's ID will be returned as a string.
+### <a name="ContainerCheckpoint"></a>func ContainerCheckpoint
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ContainerCheckpoint(name: [string](https://godoc.org/builtin#string), keep: [bool](https://godoc.org/builtin#bool), leaveRunning: [bool](https://godoc.org/builtin#bool), tcpEstablished: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
+ContainerCheckPoint performs a checkpopint on a container by its name or full/partial container
+ID. On successful checkpoint, the id of the checkpointed container is returned.
+### <a name="ContainerExists"></a>func ContainerExists
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ContainerExists(name: [string](https://godoc.org/builtin#string)) [int](https://godoc.org/builtin#int)</div>
+ContainerExists takes a full or partial container ID or name and returns an int as to
+whether the container exists in local storage. A result of 0 means the container does
+exists; whereas a result of 1 means it could not be found.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ContainerExists '{"name": "flamboyant_payne"}'{
+ "exists": 0
+}
+~~~
+### <a name="ContainerRestore"></a>func ContainerRestore
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ContainerRestore(name: [string](https://godoc.org/builtin#string), keep: [bool](https://godoc.org/builtin#bool), tcpEstablished: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
+ContainerRestore restores a container that has been checkpointed. The container to be restored can
+be identified by its name or full/partial container ID. A successful restore will result in the return
+of the container's ID.
+### <a name="ContainerRunlabel"></a>func ContainerRunlabel
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ContainerRunlabel(runlabel: [Runlabel](#Runlabel)) </div>
+ContainerRunlabel runs executes a command as described by a given container image label.
### <a name="CreateContainer"></a>func CreateContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -253,12 +304,36 @@ $ varlink call unix:/run/podman/io.podman/io.podman.CreatePod '{"create": {"name
method DeleteStoppedContainers() [[]string](#[]string)</div>
DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted
container IDs. See also [RemoveContainer](RemoveContainer).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteStoppedContainers
+{
+ "containers": [
+ "451410b931d00def8aa9b4f8084e4d4a39e5e04ea61f358cf53a5cf95afcdcee",
+ "8b60f754a3e01389494a9581ade97d35c2765b6e2f19acd2d3040c82a32d1bc0",
+ "cf2e99d4d3cad6073df199ed32bbe64b124f3e1aba6d78821aa8460e70d30084",
+ "db901a329587312366e5ecff583d08f0875b4b79294322df67d90fc6eed08fc1"
+ ]
+}
+~~~
### <a name="DeleteUnusedImages"></a>func DeleteUnusedImages
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
method DeleteUnusedImages() [[]string](#[]string)</div>
DeleteUnusedImages deletes any images not associated with a container. The IDs of the deleted images are returned
in a string array.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteUnusedImages
+{
+ "images": [
+ "166ea6588079559c724c15223f52927f514f73dd5c5cf2ae2d143e3b2e6e9b52",
+ "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
+ "3ef70f7291f47dfe2b82931a993e16f5a44a0e7a68034c3e0e086d77f5829adc",
+ "59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690"
+ ]
+}
+~~~
### <a name="ExportContainer"></a>func ExportContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -267,6 +342,13 @@ ExportContainer creates an image from a container. It takes the name or ID of a
path representing the target tarfile. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)
error will be returned.
The return value is the written tarfile.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ExportContainer '{"name": "flamboyant_payne", "path": "/tmp/payne.tar" }'
+{
+ "tarfile": "/tmp/payne.tar"
+}
+~~~
### <a name="ExportImage"></a>func ExportImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -358,6 +440,32 @@ method GetPod(name: [string](https://godoc.org/builtin#string)) [ListPodData](#L
GetPod takes a name or ID of a pod and returns single [ListPodData](#ListPodData)
structure. A [PodNotFound](#PodNotFound) error will be returned if the pod cannot be found.
See also [ListPods](ListPods).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.GetPod '{"name": "foobar"}'
+{
+ "pod": {
+ "cgroup": "machine.slice",
+ "containersinfo": [
+ {
+ "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+ "name": "1840835294cf-infra",
+ "status": "running"
+ },
+ {
+ "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+ "name": "upbeat_murdock",
+ "status": "running"
+ }
+ ],
+ "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+ "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+ "name": "foobar",
+ "numberofcontainers": "2",
+ "status": "Running"
+ }
+}
+~~~
### <a name="GetPodStats"></a>func GetPodStats
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -403,6 +511,20 @@ method HistoryImage(name: [string](https://godoc.org/builtin#string)) [ImageHist
HistoryImage takes the name or ID of an image and returns information about its history and layers. The returned
history is in the form of an array of ImageHistory structures. If the image cannot be found, an
[ImageNotFound](#ImageNotFound) error is returned.
+### <a name="ImageExists"></a>func ImageExists
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ImageExists(name: [string](https://godoc.org/builtin#string)) [int](https://godoc.org/builtin#int)</div>
+ImageExists talks a full or partial image ID or name and returns an int as to whether
+the image exists in local storage. An int result of 0 means the image does exist in
+local storage; whereas 1 indicates the image does not exists in local storage.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ImageExists '{"name": "imageddoesntexist"}'
+{
+ "exists": 1
+}
+~~~
### <a name="ImportImage"></a>func ImportImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -447,12 +569,42 @@ Containers in a pod are killed independently. If there is an error killing one c
will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
If the pod was killed with no errors, the pod ID is returned.
See also [StopPod](StopPod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.KillPod '{"name": "foobar", "signal": 15}'
+{
+ "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+}
+~~~
### <a name="ListContainerChanges"></a>func ListContainerChanges
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
method ListContainerChanges(name: [string](https://godoc.org/builtin#string)) [ContainerChanges](#ContainerChanges)</div>
ListContainerChanges takes a name or ID of a container and returns changes between the container and
its base image. It returns a struct of changed, deleted, and added path names.
+### <a name="ListContainerMounts"></a>func ListContainerMounts
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ListContainerMounts() [[]string](#[]string)</div>
+ListContainerMounts gathers all the mounted container mount points and returns them as an array
+of strings
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ListContainerMounts
+{
+ "mounts": [
+ "/var/lib/containers/storage/overlay/b215fb622c65ba3b06c6d2341be80b76a9de7ae415ce419e65228873d4f0dcc8/merged",
+ "/var/lib/containers/storage/overlay/5eaf806073f79c0ed9a695180ad598e34f963f7407da1d2ccf3560bdab49b26f/merged",
+ "/var/lib/containers/storage/overlay/1ecb6b1dbb251737c7a24a31869096839c3719d8b250bf075f75172ddcc701e1/merged",
+ "/var/lib/containers/storage/overlay/7137b28a3c422165fe920cba851f2f8da271c6b5908672c451ebda03ad3919e2/merged"
+ ]
+}
+~~~
+### <a name="ListContainerPorts"></a>func ListContainerPorts
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ListContainerPorts(name: [string](https://godoc.org/builtin#string)) [NotImplemented](#NotImplemented)</div>
+This function is not implemented yet.
### <a name="ListContainerProcesses"></a>func ListContainerProcesses
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -491,6 +643,61 @@ an image currently in storage. See also [InspectImage](InspectImage).
method ListPods() [ListPodData](#ListPodData)</div>
ListPods returns a list of pods in no particular order. They are
returned as an array of ListPodData structs. See also [GetPod](#GetPod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ListPods
+{
+ "pods": [
+ {
+ "cgroup": "machine.slice",
+ "containersinfo": [
+ {
+ "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+ "name": "1840835294cf-infra",
+ "status": "running"
+ },
+ {
+ "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+ "name": "upbeat_murdock",
+ "status": "running"
+ }
+ ],
+ "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+ "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+ "name": "foobar",
+ "numberofcontainers": "2",
+ "status": "Running"
+ },
+ {
+ "cgroup": "machine.slice",
+ "containersinfo": [
+ {
+ "id": "1ca4b7bbba14a75ba00072d4b705c77f3df87db0109afaa44d50cb37c04a477e",
+ "name": "784306f655c6-infra",
+ "status": "running"
+ }
+ ],
+ "createdat": "2018-12-07 13:09:57.105112457 -0600 CST",
+ "id": "784306f655c6200aea321dd430ba685e9b2cc1f7d7528a72f3ff74ffb29485a2",
+ "name": "nostalgic_pike",
+ "numberofcontainers": "1",
+ "status": "Running"
+ }
+ ]
+}
+~~~
+### <a name="MountContainer"></a>func MountContainer
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method MountContainer(name: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
+MountContainer mounts a container by name or full/partial ID. Upon a successful mount, the destination
+mount is returned as a string.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.MountContainer '{"name": "jolly_shannon"}'{
+ "path": "/var/lib/containers/storage/overlay/419eeb04e783ea159149ced67d9fcfc15211084d65e894792a96bedfae0470ca/merged"
+}
+~~~
### <a name="PauseContainer"></a>func PauseContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -508,6 +715,13 @@ Containers in a pod are paused independently. If there is an error pausing one c
will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
If the pod was paused with no errors, the pod ID is returned.
See also [UnpausePod](#UnpausePod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.PausePod '{"name": "foobar"}'
+{
+ "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+}
+~~~
### <a name="Ping"></a>func Ping
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -525,7 +739,7 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.Ping
### <a name="PullImage"></a>func PullImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method PullImage(name: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
+method PullImage(name: [string](https://godoc.org/builtin#string), certDir: [string](https://godoc.org/builtin#string), creds: [string](https://godoc.org/builtin#string), signaturePolicy: [string](https://godoc.org/builtin#string), tlsVerify: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
PullImage pulls an image from a repository to local storage. After the pull is successful, the ID of the image
is returned.
#### Example
@@ -538,9 +752,10 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.PullImage '{"name": "regi
### <a name="PushImage"></a>func PushImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method PushImage(name: [string](https://godoc.org/builtin#string), tag: [string](https://godoc.org/builtin#string), tlsverify: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
+method PushImage(name: [string](https://godoc.org/builtin#string), tag: [string](https://godoc.org/builtin#string), tlsverify: [bool](https://godoc.org/builtin#bool), signaturePolicy: [string](https://godoc.org/builtin#string), creds: [string](https://godoc.org/builtin#string), certDir: [string](https://godoc.org/builtin#string), compress: [bool](https://godoc.org/builtin#bool), format: [string](https://godoc.org/builtin#string), removeSignatures: [bool](https://godoc.org/builtin#bool), signBy: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
PushImage takes three input arguments: the name or ID of an image, the fully-qualified destination name of the image,
-and a boolean as to whether tls-verify should be used. It will return an [ImageNotFound](#ImageNotFound) error if
+and a boolean as to whether tls-verify should be used (with false disabling TLS, not affecting the default behavior).
+It will return an [ImageNotFound](#ImageNotFound) error if
the image cannot be found in local storage; otherwise the ID of the image will be returned on success.
### <a name="RemoveContainer"></a>func RemoveContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -671,8 +886,8 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.StopContainer '{"name": "
### <a name="StopPod"></a>func StopPod
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method StopPod(name: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
-StopPod stops containers in a pod. It takes the name or ID of a pod.
+method StopPod(name: [string](https://godoc.org/builtin#string), timeout: [int](https://godoc.org/builtin#int)) [string](https://godoc.org/builtin#string)</div>
+StopPod stops containers in a pod. It takes the name or ID of a pod and a timeout.
If the pod cannot be found, a [PodNotFound](#PodNotFound) error will be returned instead.
Containers in a pod are stopped independently. If there is an error stopping one container, the ID of those containers
will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
@@ -696,6 +911,16 @@ be found, an [ImageNotFound](#ImageNotFound) error will be returned; otherwise,
method TopPod() [NotImplemented](#NotImplemented)</div>
This method has not been implemented yet.
+### <a name="UnmountContainer"></a>func UnmountContainer
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method UnmountContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool)) </div>
+UnmountContainer umounts a container by its name or full/partial container ID.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.UnmountContainer '{"name": "jolly_shannon", "force": false}'
+{}
+~~~
### <a name="UnpauseContainer"></a>func UnpauseContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -713,6 +938,13 @@ Containers in a pod are unpaused independently. If there is an error unpausing o
will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
If the pod was unpaused with no errors, the pod ID is returned.
See also [PausePod](#PausePod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.UnpausePod '{"name": "foobar"}'
+{
+ "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+}
+~~~
### <a name="UpdateContainer"></a>func UpdateContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -1267,9 +1499,8 @@ reason [string](https://godoc.org/builtin#string)
### <a name="PodCreate"></a>type PodCreate
PodCreate is an input structure for creating pods.
-It emulates options to podman pod create, however
-changing pause image name and pause container
-is not currently supported
+It emulates options to podman pod create. The infraCommand and
+infraImage options are currently NotSupported.
name [string](https://godoc.org/builtin#string)
@@ -1280,6 +1511,12 @@ labels [map[string]](#map[string])
share [[]string](#[]string)
infra [bool](https://godoc.org/builtin#bool)
+
+infraCommand [string](https://godoc.org/builtin#string)
+
+infraImage [string](https://godoc.org/builtin#string)
+
+publish [[]string](#[]string)
### <a name="PodmanInfo"></a>type PodmanInfo
PodmanInfo describes the Podman host and build
@@ -1293,6 +1530,33 @@ insecure_registries [[]string](#[]string)
store [InfoStore](#InfoStore)
podman [InfoPodmanBinary](#InfoPodmanBinary)
+### <a name="Runlabel"></a>type Runlabel
+
+Runlabel describes the required input for container runlabel
+
+image [string](https://godoc.org/builtin#string)
+
+authfile [string](https://godoc.org/builtin#string)
+
+certDir [string](https://godoc.org/builtin#string)
+
+creds [string](https://godoc.org/builtin#string)
+
+display [bool](https://godoc.org/builtin#bool)
+
+name [string](https://godoc.org/builtin#string)
+
+pull [bool](https://godoc.org/builtin#bool)
+
+signaturePolicyPath [string](https://godoc.org/builtin#string)
+
+tlsVerify [bool](https://godoc.org/builtin#bool)
+
+label [string](https://godoc.org/builtin#string)
+
+extraArgs [[]string](#[]string)
+
+opts [map[string]](#map[string])
### <a name="Sockets"></a>type Sockets
Sockets describes sockets location for a container
@@ -1336,7 +1600,7 @@ ImageNotFound means the image could not be found by the provided name or ID in l
NoContainerRunning means none of the containers requested are running in a command that requires a running container.
### <a name="NoContainersInPod"></a>type NoContainersInPod
-NoContainersInPod means a pod has no containers on which to perform operation. It contains
+NoContainersInPod means a pod has no containers on which to perform the operation. It contains
the pod ID.
### <a name="PodContainerError"></a>type PodContainerError
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c4e208894..32ed94ad4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -180,6 +180,29 @@ Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
+### Go Format and lint
+
+All code changes must pass ``make validate`` and ``make lint``, as
+executed in a standard container. The container image for this
+purpose is provided at: ``quay.io/libpod/gate:latest``. However,
+for changes to the image itself, it may also be built locally
+from the repository root, with the command:
+
+```
+sudo podman build -t quay.io/libpod/gate:latest -f contrib/gate/Dockerfile .
+```
+
+***N/B:*** **don't miss the dot (.) at the end, it's really important**
+
+The container executes 'make' by default, on a copy of the repository.
+This avoids changing or leaving build artifacts in your working directory.
+Execution does not require any special permissions from the host. However,
+the repository root must be bind-mounted into the container at
+'/usr/src/libpod'. For example, running `make lint` is done (from
+the repository root) with the command:
+
+``sudo podman run -it --rm -v $PWD:/usr/src/libpod:ro --security-opt label=disable quay.io/libpod/gate:latest lint``
+
### Integration Tests
Our primary means of performing integration testing for libpod is with the
diff --git a/Dockerfile b/Dockerfile
index 70d1a7629..c227207bd 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.10
+FROM golang:1.11
RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list
@@ -52,7 +52,7 @@ ADD . /go/src/github.com/containers/libpod
RUN set -x && cd /go/src/github.com/containers/libpod && make install.libseccomp.sudo
# Install runc
-ENV RUNC_COMMIT 78ef28e63bec2ee4c139b5e3e0d691eb9bdc748d
+ENV RUNC_COMMIT 96ec2177ae841256168fcf76954f7177af9446eb
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@@ -64,7 +64,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install conmon
-ENV CRIO_COMMIT 662dbb31b5d4f5ed54511a47cde7190c61c28677
+ENV CRIO_COMMIT 7a283c391abb7bd25086a8ff91dbb36ebdd24466
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/kubernetes-sigs/cri-o.git "$GOPATH/src/github.com/kubernetes-sigs/cri-o.git" \
@@ -112,8 +112,7 @@ RUN set -x \
&& go get -u github.com/mailru/easyjson/... \
&& install -D -m 755 "$GOPATH"/bin/easyjson /usr/bin/
-# Install criu
-ENV CRIU_COMMIT 584cbe4643c3fc7dc901ff08bf923ca0fe7326f9
+# Install latest stable criu version
RUN set -x \
&& cd /tmp \
&& git clone https://github.com/checkpoint-restore/criu.git \
diff --git a/Dockerfile.CentOS b/Dockerfile.CentOS
index 67b7ddce1..3e14e59db 100644
--- a/Dockerfile.CentOS
+++ b/Dockerfile.CentOS
@@ -68,7 +68,7 @@ RUN set -x \
&& install -D -m 755 "$GOPATH"/bin/easyjson /usr/bin/
# Install conmon
-ENV CRIO_COMMIT 662dbb31b5d4f5ed54511a47cde7190c61c28677
+ENV CRIO_COMMIT 7a283c391abb7bd25086a8ff91dbb36ebdd24466
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/kubernetes-sigs/cri-o.git "$GOPATH/src/github.com/kubernetes-sigs/cri-o.git" \
diff --git a/Dockerfile.Fedora b/Dockerfile.Fedora
index 38cd599d4..2080b597b 100644
--- a/Dockerfile.Fedora
+++ b/Dockerfile.Fedora
@@ -17,7 +17,7 @@ RUN dnf -y install btrfs-progs-devel \
libseccomp-devel \
libselinux-devel \
skopeo-containers \
- https://kojipkgs.fedoraproject.org//packages/runc/1.0.0/55.dev.git578fe65.fc28/x86_64/runc-1.0.0-55.dev.git578fe65.fc28.x86_64.rpm \
+ runc \
make \
ostree-devel \
python \
@@ -72,7 +72,7 @@ RUN set -x \
&& install -D -m 755 "$GOPATH"/bin/easyjson /usr/bin/
# Install conmon
-ENV CRIO_COMMIT 662dbb31b5d4f5ed54511a47cde7190c61c28677
+ENV CRIO_COMMIT 7a283c391abb7bd25086a8ff91dbb36ebdd24466
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/kubernetes-sigs/cri-o.git "$GOPATH/src/github.com/kubernetes-sigs/cri-o.git" \
diff --git a/Makefile b/Makefile
index ae1b263ad..1f24e32ea 100644
--- a/Makefile
+++ b/Makefile
@@ -1,10 +1,11 @@
GO ?= go
DESTDIR ?= /
-EPOCH_TEST_COMMIT ?= 733cfe96819e1dc044e982b5321b3c902d1a47c6
+EPOCH_TEST_COMMIT ?= e1732a5213147e3c0b7bf60b55a332c3720ecb4b
HEAD ?= HEAD
CHANGELOG_BASE ?= HEAD~
CHANGELOG_TARGET ?= HEAD
PROJECT := github.com/containers/libpod
+GIT_BASE_BRANCH ?= origin/master
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN ?= $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
LIBPOD_IMAGE ?= libpod_dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
@@ -31,7 +32,7 @@ BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
OCIUMOUNTINSTALLDIR=$(PREFIX)/share/oci-umount/oci-umount.d
SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
-PACKAGES ?= $(shell $(GO) list -tags "${BUILDTAGS}" ./... | grep -v github.com/containers/libpod/vendor | grep -v e2e)
+PACKAGES ?= $(shell $(GO) list -tags "${BUILDTAGS}" ./... | grep -v github.com/containers/libpod/vendor | grep -v e2e | grep -v system )
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}")
@@ -104,8 +105,11 @@ test/copyimg/copyimg: .gopathok $(wildcard test/copyimg/*.go)
test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
$(GO) build -ldflags '$(LDFLAGS)' -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp
+test/goecho/goecho: .gopathok $(wildcard test/goecho/*.go)
+ $(GO) build -ldflags '$(LDFLAGS)' -o $@ $(PROJECT)/test/goecho
+
podman: .gopathok $(PODMAN_VARLINK_DEPENDENCIES)
- $(GO) build -i -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/podman
+ $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/podman
local-cross: $(CROSS_BUILD_TARGETS)
@@ -113,7 +117,7 @@ bin/podman.cross.%: .gopathok
TARGET="$*"; \
GOOS="$${TARGET%%.*}" \
GOARCH="$${TARGET##*.}" \
- $(GO) build -i -ldflags '$(LDFLAGS_PODMAN)' -tags '$(BUILDTAGS_CROSS)' -o "$@" $(PROJECT)/cmd/podman
+ $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags '$(BUILDTAGS_CROSS)' -o "$@" $(PROJECT)/cmd/podman
python:
ifdef HAS_PYTHON3
@@ -130,6 +134,7 @@ clean:
test/bin2img/bin2img \
test/checkseccomp/checkseccomp \
test/copyimg/copyimg \
+ test/goecho/goecho \
test/testdata/redis-image \
cmd/podman/varlink/iopodman.go \
libpod/container_ffjson.go \
@@ -161,19 +166,26 @@ integration.centos:
DIST=CentOS sh .papr_prepare.sh
shell: libpodimage
- ${CONTAINER_RUNTIME} run --tmpfs -e STORAGE_OPTIONS="--storage-driver=vfs" -e CGROUP_MANAGER=cgroupfs -e TESTFLAGS -e TRAVIS -it --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} sh
+ ${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e CGROUP_MANAGER=cgroupfs -e TESTFLAGS -e TRAVIS -it --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} sh
testunit: libpodimage
${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e CGROUP_MANAGER=cgroupfs -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} make localunit
-localunit: varlink_generate
+localunit: test/goecho/goecho varlink_generate
$(GO) test -tags "$(BUILDTAGS)" -cover $(PACKAGES)
+ $(MAKE) -C contrib/cirrus/packer test
ginkgo:
ginkgo -v -tags "$(BUILDTAGS)" -cover -flakeAttempts 3 -progress -trace -noColor test/e2e/.
localintegration: varlink_generate test-binaries clientintegration ginkgo
+localsystem: .install.ginkgo .install.gomega
+ ginkgo -v -noColor test/system/
+
+system.test-binary: .install.ginkgo .install.gomega
+ $(GO) test -c ./test/system
+
clientintegration:
$(MAKE) -C contrib/python/podman integration
$(MAKE) -C contrib/python/pypodman integration
@@ -183,7 +195,7 @@ vagrant-check:
binaries: varlink_generate easyjson_generate podman
-test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp
+test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp test/goecho/goecho
MANPAGES_MD ?= $(wildcard docs/*.md pkg/*/docs/*.md)
MANPAGES ?= $(MANPAGES_MD:%.md=%)
@@ -283,7 +295,7 @@ install.tools: .install.gitvalidation .install.gometalinter .install.md2man .ins
if [ ! -x "$(GOBIN)/gometalinter" ]; then \
$(GO) get -u github.com/alecthomas/gometalinter; \
cd $(FIRST_GOPATH)/src/github.com/alecthomas/gometalinter; \
- git checkout 23261fa046586808612c61da7a81d75a658e0814; \
+ git checkout e8d801238da6f0dfd14078d68f9b53fa50a7eeb5; \
$(GO) install github.com/alecthomas/gometalinter; \
$(GOBIN)/gometalinter --install; \
fi
@@ -334,6 +346,10 @@ API.md: cmd/podman/varlink/io.podman.varlink
validate: gofmt .gitvalidation
+build-all-new-commits:
+ # Validate that all the commits build on top of $(GIT_BASE_BRANCH)
+ git rebase $(GIT_BASE_BRANCH) -x make
+
.PHONY: \
.gopathok \
binaries \
diff --git a/OWNERS b/OWNERS
index 00f8bb1c3..a03622b2e 100644
--- a/OWNERS
+++ b/OWNERS
@@ -3,9 +3,14 @@ approvers:
- baude
- mrunalp
- rhatdan
+ - TomSweeneyRedHat
+ - umohnani8
+ - giuseppe
+ - vrothberg
reviewers:
- mheon
- baude
+ - mrunalp
- rhatdan
- TomSweeneyRedHat
- umohnani8
diff --git a/README.md b/README.md
index 157e94d7c..865900c62 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,15 @@
![PODMAN logo](logo/podman-logo-source.svg)
-# libpod - library for running OCI-based containers in Pods
-### Latest Version: 0.10.1.3
-### Status: Active Development
+# Library and tool for running OCI-based containers in Pods
-## What is the scope of this project?
+Libpod provides a library for applications looking to use the Container Pod concept,
+popularized by Kubernetes. libpod also contains the `podman` tool, for managing
+Pods, Containers, and Container Images.
-libpod provides a library for applications looking to use the Container Pod concept popularized by Kubernetes.
-libpod also contains a tool called podman for managing Pods, Containers, and Container Images.
+* [Latest Version: 0.12.1](https://github.com/containers/libpod/releases/latest)
+* [Continuous Integration:](contrib/cirrus/README.md) [![Build Status](https://api.cirrus-ci.com/github/containers/libpod.svg)](https://cirrus-ci.com/github/containers/libpod/master)
+
+## Overview and scope
At a high level, the scope of libpod and podman is the following:
@@ -17,11 +19,22 @@ At a high level, the scope of libpod and podman is the following:
* Full management of container lifecycle
* Support for pods to manage groups of containers together
* Resource isolation of containers and pods.
+* Integration with CRI-O to share containers and backend code.
+
+## Roadmap
+
+1. Python frontend for Varlink API
+1. Integrate libpod into CRI-O to replace its existing container management backend
+1. Further work on the podman pod command
+1. Further improvements on rootless containers
+1. In-memory locking to replace file locks
-## What is not in scope for this project?
+## Out of scope
-* Signing and pushing images to various image storages. See [Skopeo](https://github.com/containers/skopeo/).
-* Container Runtimes daemons for working with Kubernetes CRIs. See [CRI-O](https://github.com/kubernetes-sigs/cri-o). We are working to integrate libpod into CRI-O to share containers and backend code with Podman.
+* Signing and pushing images to various image storages.
+ See [Skopeo](https://github.com/containers/skopeo/).
+* Container Runtimes daemons for working with Kubernetes CRIs.
+ See [CRI-O](https://github.com/kubernetes-sigs/cri-o).
## OCI Projects Plans
@@ -66,14 +79,6 @@ Release notes for recent Podman versions
**[Contributing](CONTRIBUTING.md)**
Information about contributing to this project.
-## Current Roadmap
-
-1. Python frontend for Varlink API
-1. Integrate libpod into CRI-O to replace its existing container management backend
-1. Further work on the podman pod command
-1. Further improvements on rootless containers
-1. In-memory locking to replace file locks
-
[spec-hooks]: https://github.com/opencontainers/runtime-spec/blob/v2.0.1/config.md#posix-platform-hooks
## Buildah and Podman relationship
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 9cdf3faae..2d7ca6cbf 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,71 @@
# Release Notes
+## 0.12.1.2
+### Bugfixes
+- Fixed a bug where an empty path for named volumes could make it impossible to create containers
+- Fixed a bug where containers using another container's network namespace would not also use the other container's /etc/hosts and /etc/resolv.conf
+- Fixed a bug where containers with `--rm` which failed to start were not removed
+- Fixed a potential race condition attempting to read `/etc/passwd` inside containers
+
+## 0.12.1.1
+### Features
+- Added the `podman generate kube` command to generate Kubernetes Pod and Service YAML for Podman containers and pods
+- The `podman pod stop` flag now accepts a `--timeout` flag to set the timeout for stopping containers in the pod
+
+### Bugfixes
+- Fixed a bug where rootless Podman would fail to start if the default OCI hooks directory is not present
+
+## 0.12.1
+### Features
+- Rootless Podman now creates the storage.conf, libpod.conf, and mounts.conf configuration files automatically in `~/.config/containers/` for ease of reconfiguration
+- The `podman pod create` command can expose ports in the pod's network namespace, allowing public services to be created in pods
+- The `podman container checkpoint` command can now keep containers running after they are checkpointed with the `--leave-running` flag
+- The `podman container checkpoint` and `podman container restore` commands now support the `--tcp-established` flag to checkpoint and restore containers with active TCP connections
+- The `podman version` command now has a `--format` flag to produce machine-readable output
+- Added the `podman container exists`, `podman pod exists`, and `podman image exists` commands to easily check for a container/pod/image, respectively, by name or ID
+- The `podman ps --pod` flag now has a short alias, `-p`
+- The `podman rmi` and `podman rm` commands now have a `--prune` flag to prune unused images and containers, respectively
+- The `podman ps` command now has a `--sync` flag to force a sync of Podman's state against the OCI runtime, resolving some state desync errors
+- Added the `podman volume` set of commands for creating and managing local-only named volumes
+
+### Bugfixes
+- Fixed a breaking change in rootless Podman where a change in default paths caused Podman to be unable to function on systems upgraded from 0.10.x or earlier
+- Fixed a bug where `podman exec` without `-t` would still use a terminal if the container was created with `-t`
+- Fixed a bug where container root propogation was not being properly adjusted if volumes with root propogation set were mounted into the container
+- Fixed a bug where `podman exec` could hold the container lock longer than necessary waiting for an exited container
+- Fixed a bug where rootless containers using `slirp4netns` for networking were reporting using `bridge` networking in `podman inspect`
+- Fixed a bug where `podman container restore -a` was attempting to restore all containers, including created and running ones. It will now only attempt to restore stopped and exited containers
+- Fixed a bug where rootless Podman detached containers were not being properly cleaned up
+- Fixed a bug where privileged containers were being mounted with incorrect (too restrictive) mount options such as `nodev`
+- Fixed a bug where `podman stop` would throw an error attempting to stop a container that had already stopped
+- Fixed a bug where `NOTIFY_SOCKET` was not properly being passed into Podman containers
+- Fixed a bug where `/dev/shm` was not properly mounted in rootless containers
+- Fixed a bug where rootless Podman would set up the CNI plugins for networking (despite not using them in rootless mode), potentially causing `inotify` related errors
+- Fixed a bug where Podman would error on numeric GIDs that do not exist in the container's `/etc/group`
+- Fixed a bug where containers in pods or created with `--net=container` were not mounting `/etc/resolv.conf` and `/etc/hosts`
+
+### Misc
+- `podman build` now defaults the `--force-rm` flag to `true`
+- Improved `podman runlabel` support for labels featuring arguments with whitespace
+- Containers without a network namespace will now use the host's `resolv.conf`
+- The `slirp4netns` network mode can now be used with containers running as root. It may be useful for container-in-container scenarios where the outer container does not have host networking set
+- Podman now uses `inotify` to wait for container exit files to be created, instead of polling. If `inotify` cannot be used, Podman will fall back to polling to check if the file has been created
+- The `podman logs` command now uses improved short-options handling, allowing its flags to be combined if desired (for example, `podman logs -lf` instead of `podman logs -l -f`)
+- Hardcoded OCI hooks directories used by Podman are now deprecated; they should instead be coded into the `libpod.conf` configuration file. They can be specified as an array via `hooks_dir`
+
+## 0.11.1.1
+### Bugfixes
+- Fixed a bug where Podman was not correctly adding firewall rules for containers, preventing them from accessing the network
+- Fixed a bug where full error messages were being lost when creating containers with user namespaces
+- Fixed a bug where container state was not properly updated if a failure occurred during network setup, which could cause mounts to be left behind when the container was removed
+- Fixed a bug where `podman exec` could time out on slower systems by increasing the relevant timeout
+
+### Misc
+- `podman rm -f` now removes paused containers. As such, `podman rm -af` completing successfully guarantees all Podman containers have been removed
+- Added a field to `podman info` to show if Podman is being run as rootless
+- Made a small output format change to `podman images` - image sizes now feature a space between number and unit (e.g. `123 MB` now instead of `123MB`)
+- Vendored an updated version of `containers/storage` to fix several bugs reported upstream
+
## 0.11.1
### Features
- Added `--all` and `--latest` flags to `podman checkpoint` and `podman restore`
diff --git a/changelog.txt b/changelog.txt
index ace41f1d9..b0680a02c 100644
--- a/changelog.txt
+++ b/changelog.txt
@@ -1,3 +1,317 @@
+- Changelog for v0.12.1.2 (2018-12-13)
+ * Add release notes for 0.12.1.2
+ * runlabel should sub podman for docker|/usr/bin/docker
+ * condition fixed for adding volume to boltdb.
+ * e2e: add tests for systemd
+ * Add test for sharing resolv and hosts with netns
+ * Makefile tweaks to fix make shell
+ * failed containers with --rm should remove themselves
+ * Fix documentation links and flow
+ * Set Socket label for contianer
+ * Containers sharing a netns should share resolv/hosts
+ * Prevent a second lookup of user for image volumes
+ * fix typo in kubernetes
+ * No need to use `-i` in go build (with go 1.10 and above)
+ * rootless: fix restart when using fuse-overlayfs
+ * Cirrus: Update base-image build docs
+ * Add capabilities to generate kube
+ * disable F29 tests on PAPR
+ * Ensure storage options are properly initialized
+ * add more example usage to varlink endpoints
+ * Update for API change
+ * Vendor buildah after merging mtrmac/blob-info-caching-on-top-of-contents-caching
+ * Vendor c/image after merging c/image#536
+ * Bump gitvalidation epoch
+ * Bump to v0.12.2-dev
+
+- Changelog for v0.12.1.1 (2018-12-07)
+ * Update release notes for v0.12.1.1
+ * Fix errors where OCI hooks directory does not exist
+ * add timeout to pod stop
+ * Remove manual handling of insecure registries in (podman search)
+ * Fix reporting the registries.conf path on error
+ * Remove manual handling of insecure registries in doPullImage
+ * Remove the forceSecure parameter on the pull call stack
+ * Remove manual handling of insecure registries in PushImageToReference
+ * Factor out the registries.conf location code in pkg/registries
+ * Remove the forceSecure parameter of Image.PushImageTo*
+ * Minimally update for the DockerInsecureSkipTLSVerify type change
+ * Bump gitvalidation epoch
+ * Bump to v0.12.2-dev
+ * Fix build on non-Linux
+ * Remove some unused data structures and code
+ * Vendor buildah after merging https://github.com/containers/buildah/pull/1214
+ * Update containers/image to 63a1cbdc5e6537056695cf0d627c0a33b334df53
+ * Cirrus: Document and codify base-image production
+ * Cirrus: Use Makefile for image-building
+ * Refactor BooleanAction to mimic golang interface
+ * generate kube
+
+- Changelog for v0.12.1 (2018-12-06)
+ * Update release notes for 0.12.1
+ * bind mount /etc/resolv.conf|hosts in pods
+ * Remove --sync flag from `podman rm`
+ * Add locking to Sync() on containers
+ * Add --sync flag to podman ps
+ * Add --sync option to podman rm
+ * Tests for podman volume commands
+ * Add "podman volume" command
+ * tutorial: add ostree dependency
+ * Pick registry to login from full image name as well
+ * Add ability to prune containers and images
+ * Invert tlsverify default in API
+ * set .54 version for f28 due to memory error
+ * Vendor in latest containers/storage
+ * pkg/lookup: Return ID-only pointers on ErrNo*Entries
+ * test for rmi with children
+ * libpod/container_internal_linux: Allow gids that aren't in the group file
+ * Don't initialize CNI when running as rootless
+ * correct algorithm for deleting all images
+ * Use runtime lockDir in BoltDB state
+ * test: update runc again
+ * vendor: update containers/storage
+ * create pod on the fly
+ * libpod/container_internal: Deprecate implicit hook directories
+ * Revert changes to GetDefaultStoreOptions
+ * Fix libpod static dir selection when graphroot changed
+ * podman pod exists
+ * Adding more varlink endpoints
+ * Ensure directory where we will make database exists
+ * Fix typo
+ * rootless: raise error if newuidmap/newgidmap are not installed
+ * Add better descriptions for validation errors in DB
+ * Fix gofmt and lint
+ * Make locks dir in unit tests
+ * Do not initialize locks dir in BoltDB
+ * Move rootless storage config into libpod
+ * Set default paths from DB if not explicitly overridden
+ * Add a struct indicating if some Runtime fields were set
+ * Make DB config validation an explicit step
+ * Move DB configuration up in runtime setup
+ * Add ability to retrieve runtime configuration from DB
+ * Add short-option handling to logs
+ * tests: always install runc on Ubuntu
+ * cirrus: update ubuntu image
+ * cirrus: make apt noninteractive
+ * Dockerfile, .cirrus.yml: update runc commit
+ * rootless: propagate XDG_RUNTIME_DIR to the OCI runtime
+ * Update ubuntu VM image w/ newer runc
+ * add pod short option to ps
+ * Add create test with --mount flag
+ * Only include container SizeRootFs when requested
+ * /dev/shm should be mounted even in rootless mode.
+ * disable checkpoint tests on f29
+ * test, rootless: specify USER env variable
+ * Revert "downgrade runc due a rootless bug"
+ * Fix completions to work with podman run command
+ * hide kube command for now
+ * pypod create/run: ignore args for container command
+ * Add support for --all in pypodman ps command
+ * Fixes #1867
+ * tests: fix NOTIFY_SOCKET test
+ * Fix golang formatting issues
+ * oci: propagate NOTIFY_SOCKET on runtime start
+ * test: fix test for NOTIFY_SOCKET
+ * Add test to ensure stopping a stopped container works
+ * Stopping a stopped container is not an error for Podman
+ * Disable mount options when running --privileged
+ * Vendor in latest containers/storage
+ * util: use fsnotify to wait for file
+ * vendor: update selinux
+ * rootless: store only subset of storage.conf
+ * rootless: fix cleanup
+ * network: allow slirp4netns mode also for root containers
+ * Added more checkpoint/restore test cases
+ * Fix podman container restore -a
+ * Update bash completion for checkpoint/restore
+ * Add '--tcp-established' to checkpoint/restore man page
+ * Added tcp-established to checkpoint/restore
+ * Remove unused CRIU_COMMIT variable
+ * Point CRIU_COMMIT to CRIU release 3.11
+ * Updated CRIO_COMMIT to pull in new conmon for CRIU
+ * Use also a struct to pass options to Restore()
+ * _split_token(): handle None
+ * Use host's resolv.conf if no network namespace enabled
+ * rootless: add new netmode "slirp4netns"
+ * tests: change return type for PodmanAsUser to PodmanTestIntegration
+ * test: cleanup CNI network used by the tests
+ * exec: don't wait for pidfile when the runtime exited
+ * Remove mount options relatime from podman run --mount with shared
+ * Update test case name to podman run with --mount flag
+ * Add some tests for --ip flag with run and create command
+ * Add history and namespaceoptions to image inspect
+ * add podman container|image exists
+ * set root propagation based on volume properties
+ * Actually set version for podman module / pypodman
+ * implement --format for version command
+ * podman_tutorial.md typos: arguement -> argument; missing 'a'
+ * Load NAT modules to fix tests involving CRIU
+ * Vendor in latest containers/buildah
+ * Update checkpoint/restore man pages
+ * Added option to keep containers running after checkpointing
+ * Use a struct to pass options to Checkpoint()
+ * exec: always make explicit the tty value
+ * Allow users to expose ports from the pod to the host
+ * Improve speed of containers.list()
+ * output libpod container to kubernetes yaml
+ * rootless: create empty mounts.conf if it doesn't exist
+ * registries: check user registries file only in rootless mode
+ * rootless: create storage.conf when it doesn't exist
+ * rootless: create libpod.conf when it doesn't exist
+ * Don't use $HOST and $USER variables for remote
+ * Implement pypodman start command
+ * runlabel: use shlex for splitting commands
+ * Add a rule to compile system test in Makefile
+ * Fix no-new-privileges test
+ * The system test write with ginkgo
+ * Separate common used test functions and structs to test/utils
+ * Add version command to pypodman
+ * Bump gitvalidation epoch
+ * Bump to v0.11.2-dev
+ * Cirrus: Add documentation for system-testing
+ * Cirrus: Simplify optional system-test script
+ * Cirrus: Reveal magic, parallel system-testing
+ * libpod should know if the network is disabled
+ * Lint: Silence few given goconst lint warnings
+ * Lint: Extract constant unknownPackage
+ * Lint: Tests: add missing assertions
+ * Lint: Do not ignore errors from docker run command when selinux enabled
+ * Lint: InspectImage varlink api should return errors that occurred
+ * Lint: Exclude autogenerated files from lint test
+ * Lint: Update metalinter dependency
+ * Set --force-rm for podman build to true by default
+ * Vendor in latest containers/storage
+
+- Changelog for v0.11.1.1 (2018-11-15)
+ * Vendor in containers/storage
+ * Add release notes for 0.11.1.1
+ * Increase pidWaitTimeout to 60s
+ * Cirrus: Add master branch testing status badge
+ * rootless: call IsRootless just once
+ * Bump golang to v1.10 in install.md
+ * Standardized container image for gofmt and lint
+ * Make list of approvers same as reviewers
+ * vendor: update ostree-go
+ * vendor.conf: fix typo
+ * Cleanup podman spec to not show git checkout is dirty
+ * Add space between num & unit in images output
+ * Update troubleshooting guide to deal with rootless path
+ * troubleshooting.md: add a recipe for rootless ping
+ * remove $-prefix from (most) shell examples
+ * docs: Fix duplicated entry for pod-container-unmount
+ * Better document rootless containers
+ * info: add rootless field
+ * Accurately update state if prepare() partially fails
+ * Do not hide errors when creating container with UserNSRoot
+ * rm -f now removes a paused container
+ * correct assignment of networkStatus
+ * podman_tutorial: cni build path has changed
+ * Bump gitvalidation epoch
+ * Bump to v0.11.2-dev
+ * Cirrus: Ignore any error from the IRC messenger
+ * rootless: default to fuse-overlayfs when available
+
+- Changelog for v0.11.1 (2018-11-08)
+ * Update release notes for 0.11.1
+ * update seccomp.json
+ * Touch up --log* options and daemons in man pages
+ * Fix run --hostname test that started failing post-merge
+ * move defer'd function declaration ahead of prepare error return
+ * Don't fail if /etc/passwd or /etc/group does not exists
+ * Print error status code if we fail to parse it
+ * Properly set Running state when starting containers
+ * Fix misspelling
+ * Retrieve container PID from conmon
+ * If a container ceases to exist in runc, set exit status
+ * EXPERIMENTAL: Do not call out to runc for sync
+ * Actually save changes from post-stop sync
+ * rootless: mount /sys/fs/cgroup/systemd from the host
+ * rootless: don't bind mount /sys/fs/cgroup/systemd in systemd mode
+ * Add hostname to /etc/hosts
+ * Temporarily fix the Python tests to fix some PRs
+ * Remove conmon cgroup before pod cgroup for cgroupfs
+ * Fix cleanup for "Pause a bunch of running containers"
+ * --interactive shall keep STDIN attached even when not explicitly called out
+ * Do never override podman with docker
+ * Make kill, pause, and unpause parallel.
+ * Fix long image name handling
+ * Make restart parallel and add --all
+ * Add ChangeAction to parse sub-options from --change
+ * replace quay.io/baude to quay.io/libpod
+ * Change humanize to use MB vs MiB.
+ * allow ppc64le to pass libpod integration tests
+ * Cirrus-CI: Add option to run system-tests
+ * Cirrus: Skip rebuilding images unless instructed
+ * Cirrus: Disable image build job abort on push
+ * Cirrus: Add a readme
+ * Ubuntu VM image build: try update twice
+ * Cirrus: Enable updating F28 image
+ * rootless: do not add an additional /run to runroot
+ * rootless: avoid hang on failed slirp4netns
+ * Fix setting of version information
+ * runtime: do not allow runroot longer than 50 characters
+ * attach: fix attach when cuid is too long
+ * truncate command output in ps by default
+ * Update the runc commit used for testing
+ * make various changes to ps output
+ * Sync default config with libpod.conf
+ * Use two spaces to pad PS fields
+ * unmount: fix error logic
+ * get user and group information using securejoin and runc's user library
+ * CONTRIBUTING.md: add section about describing changes
+ * Change to exported name in ParseDevice
+ * Vendor in latest containers/storage
+ * fix bug in rm -fa parallel deletes
+ * Ensure test container in running state
+ * Add tests for selinux labels
+ * Add --max-workers and heuristics for parallel operations
+ * Increase security and performance when looking up groups
+ * run prepare in parallel
+ * downgrade runc due a rootless bug
+ * runlabel: run any command
+ * Eat our own dogfood
+ * vendor: update containers/storage
+ * Add support for /usr/local installation
+ * create: fix writing cidfile when using rootless
+ * Explain the device format in man pages
+ * read conmon output and convert to json in two steps
+ * Cirrus: Use images w/ buildah fix
+ * Add --all and --latest to checkpoint/restore
+ * Use the newly added getAllOrLatestContainers() function
+ * Use the new checkAllAndLatest() function
+ * Also factor out getAllOrLatestContainers() function
+ * Add checkAllAndLatest() function
+ * Downgrade code to support python3.4
+ * Allow containers/storage to handle on SELinux labeling
+ * Use more reliable check for rootless for firewall init
+ * Vendor in latest containers/storage opencontainers/selinux
+ * Make podman ps fast
+ * Support auth file environment variable in podman build
+ * fix environment variable parsing
+ * tests: use existing CRIU version check
+ * Use the CRIU version check in checkpoint/restore
+ * Add helper function to read out CRIU version
+ * vendor in go-criu and dependencies
+ * oci: cleanup process status
+ * Handle http/https in registry given to login/out
+ * re-enable f29 testing
+ * correct stats err with non-running containers
+ * Use restoreArtifacts to save time in integration tests
+ * Make rm faster
+ * Fix man page to show info on storage
+ * Move rootless directory handling to the libpod/pkg/util directory
+ * Fix podman port -l
+ * Fix trivial missing markup in manpage
+ * Cirrus: Install CRIU in test images
+ * Cirrus: Use different CNI_COMMIT for Fedora
+ * Fix Cirrus/Packer VM image building
+ * Revert "Cirrus: Enable debugging delay on non-zero exit"
+ * Cirrus: IRC message when cirrus testing successful
+ * cirrus: Add simple IRC messenger
+ * fix NOTIFY_SOCKET in e2e testfix NOTIFY_SOCKET in e2e tests
+ * Bump gitvalidation epoch
+ * Bump to v0.10.2-dev
+
- Changelog for v0.10.1.3 (2018-10-17)
* Update release notes for 0.10.1.3
* Vendor in new new buildah/ci
diff --git a/cmd/podman/build.go b/cmd/podman/build.go
index 14bf226f9..880cb892f 100644
--- a/cmd/podman/build.go
+++ b/cmd/podman/build.go
@@ -1,6 +1,11 @@
package main
import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
buildahcli "github.com/containers/buildah/pkg/cli"
@@ -10,15 +15,15 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
)
var (
layerFlags = []cli.Flag{
cli.BoolTFlag{
+ Name: "force-rm",
+ Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful. (default true)",
+ },
+ cli.BoolTFlag{
Name: "layers",
Usage: "cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. ",
},
@@ -230,7 +235,7 @@ func buildCmd(c *cli.Context) error {
Layers: layers,
NoCache: c.Bool("no-cache"),
RemoveIntermediateCtrs: c.BoolT("rm"),
- ForceRmIntermediateCtrs: c.Bool("force-rm"),
+ ForceRmIntermediateCtrs: c.BoolT("force-rm"),
}
if c.Bool("quiet") {
diff --git a/cmd/podman/checkpoint.go b/cmd/podman/checkpoint.go
index bf280920d..824c97662 100644
--- a/cmd/podman/checkpoint.go
+++ b/cmd/podman/checkpoint.go
@@ -24,6 +24,14 @@ var (
Usage: "keep all temporary checkpoint files",
},
cli.BoolFlag{
+ Name: "leave-running, R",
+ Usage: "leave the container running after writing checkpoint to disk",
+ },
+ cli.BoolFlag{
+ Name: "tcp-established",
+ Usage: "checkpoint a container with established TCP connections",
+ },
+ cli.BoolFlag{
Name: "all, a",
Usage: "checkpoint all running containers",
},
@@ -50,7 +58,11 @@ func checkpointCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- keep := c.Bool("keep")
+ options := libpod.ContainerCheckpointOptions{
+ Keep: c.Bool("keep"),
+ KeepRunning: c.Bool("leave-running"),
+ TCPEstablished: c.Bool("tcp-established"),
+ }
if err := checkAllAndLatest(c); err != nil {
return err
@@ -59,7 +71,7 @@ func checkpointCmd(c *cli.Context) error {
containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
for _, ctr := range containers {
- if err = ctr.Checkpoint(context.TODO(), keep); err != nil {
+ if err = ctr.Checkpoint(context.TODO(), options); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
diff --git a/cmd/podman/commit.go b/cmd/podman/commit.go
index b09c6b0d9..02ede4f73 100644
--- a/cmd/podman/commit.go
+++ b/cmd/podman/commit.go
@@ -95,7 +95,7 @@ func commitCmd(c *cli.Context) error {
for _, change := range c.StringSlice("change") {
splitChange := strings.Split(strings.ToUpper(change), "=")
if !util.StringInSlice(splitChange[0], libpod.ChangeCmds) {
- return errors.Errorf("invalid syntax for --change ", change)
+ return errors.Errorf("invalid syntax for --change: %s", change)
}
}
}
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index f9e746b28..8404a29b8 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/fatih/camelcase"
"github.com/pkg/errors"
@@ -161,6 +162,13 @@ func getContext() context.Context {
return context.TODO()
}
+func getDefaultNetwork() string {
+ if rootless.IsRootless() {
+ return "slirp4netns"
+ }
+ return "bridge"
+}
+
// Common flags shared between commands
var createFlags = []cli.Flag{
cli.StringSliceFlag{
@@ -372,7 +380,7 @@ var createFlags = []cli.Flag{
cli.StringFlag{
Name: "net, network",
Usage: "Connect a container to a network",
- Value: "bridge",
+ Value: getDefaultNetwork(),
},
cli.BoolFlag{
Name: "oom-kill-disable",
@@ -414,6 +422,10 @@ var createFlags = []cli.Flag{
Name: "read-only",
Usage: "Make containers root filesystem read-only",
},
+ cli.StringFlag{
+ Name: "restart",
+ Usage: "Restart is not supported. Please use a systemd unit file for restart",
+ },
cli.BoolFlag{
Name: "rm",
Usage: "Remove container (and pod if created) after exit",
diff --git a/cmd/podman/container.go b/cmd/podman/container.go
index ff634278f..b0232c874 100644
--- a/cmd/podman/container.go
+++ b/cmd/podman/container.go
@@ -9,6 +9,7 @@ var (
attachCommand,
checkpointCommand,
cleanupCommand,
+ containerExistsCommand,
commitCommand,
createCommand,
diffCommand,
@@ -21,7 +22,7 @@ var (
mountCommand,
pauseCommand,
portCommand,
- // pruneCommand,
+ pruneContainersCommand,
refreshCommand,
restartCommand,
restoreCommand,
diff --git a/cmd/podman/containers_prune.go b/cmd/podman/containers_prune.go
new file mode 100644
index 000000000..92604e82f
--- /dev/null
+++ b/cmd/podman/containers_prune.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+var (
+ pruneContainersDescription = `
+ podman container prune
+
+ Removes all exited containers
+`
+
+ pruneContainersCommand = cli.Command{
+ Name: "prune",
+ Usage: "Remove all stopped containers",
+ Description: pruneContainersDescription,
+ Action: pruneContainersCmd,
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+func pruneContainersCmd(c *cli.Context) error {
+ var (
+ deleteFuncs []shared.ParallelWorkerInput
+ )
+
+ ctx := getContext()
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ filter := func(c *libpod.Container) bool {
+ state, _ := c.State()
+ if state == libpod.ContainerStateStopped || (state == libpod.ContainerStateExited && err == nil && c.PodID() == "") {
+ return true
+ }
+ return false
+ }
+ delContainers, err := runtime.GetContainers(filter)
+ if err != nil {
+ return err
+ }
+ if len(delContainers) < 1 {
+ return nil
+ }
+ for _, container := range delContainers {
+ con := container
+ f := func() error {
+ return runtime.RemoveContainer(ctx, con, c.Bool("force"))
+ }
+
+ deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
+ ContainerID: con.ID(),
+ ParallelFunc: f,
+ })
+ }
+ maxWorkers := shared.Parallelize("rm")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalInt("max-workers")
+ }
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ // Run the parallel funcs
+ deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
+ return printParallelOutput(deleteErrors, errCount)
+}
diff --git a/cmd/podman/create.go b/cmd/podman/create.go
index 9f6825c95..870eb28d6 100644
--- a/cmd/podman/create.go
+++ b/cmd/podman/create.go
@@ -11,6 +11,7 @@ import (
"syscall"
"github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
ann "github.com/containers/libpod/pkg/annotations"
@@ -66,7 +67,7 @@ func createCmd(c *cli.Context) error {
rootless.SetSkipStorageSetup(true)
}
- runtime, err := libpodruntime.GetContainerRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(c)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
@@ -128,7 +129,7 @@ func createContainer(c *cli.Context, runtime *libpod.Runtime) (*libpod.Container
var data *inspect.ImageData = nil
if rootfs == "" && !rootless.SkipStorageSetup() {
- newImage, err := runtime.ImageRuntime().New(ctx, c.Args()[0], rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{}, false, false)
+ newImage, err := runtime.ImageRuntime().New(ctx, c.Args()[0], rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{}, false)
if err != nil {
return nil, nil, err
}
@@ -375,8 +376,8 @@ func configureEntrypoint(c *cli.Context, data *inspect.ImageData) []string {
return entrypoint
}
-func configurePod(c *cli.Context, runtime *libpod.Runtime, namespaces map[string]string) (map[string]string, error) {
- pod, err := runtime.LookupPod(c.String("pod"))
+func configurePod(c *cli.Context, runtime *libpod.Runtime, namespaces map[string]string, podName string) (map[string]string, error) {
+ pod, err := runtime.LookupPod(podName)
if err != nil {
return namespaces, err
}
@@ -409,7 +410,12 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
inputCommand, command []string
memoryLimit, memoryReservation, memorySwap, memoryKernel int64
blkioWeight uint16
+ namespaces map[string]string
)
+ if c.IsSet("restart") {
+ return nil, errors.Errorf("--restart option is not supported.\nUse systemd unit files for restarting containers")
+ }
+
idmappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidname"), c.String("subgidname"))
if err != nil {
return nil, err
@@ -492,12 +498,21 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
return nil, errors.Errorf("--cpu-quota and --cpus cannot be set together")
}
+ // EXPOSED PORTS
+ var portBindings map[nat.Port][]nat.PortBinding
+ if data != nil {
+ portBindings, err = cc.ExposedPorts(c.StringSlice("expose"), c.StringSlice("publish"), c.Bool("publish-all"), data.ContainerConfig.ExposedPorts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
// Kernel Namespaces
// TODO Fix handling of namespace from pod
// Instead of integrating here, should be done in libpod
// However, that also involves setting up security opts
// when the pod's namespace is integrated
- namespaces := map[string]string{
+ namespaces = map[string]string{
"pid": c.String("pid"),
"net": c.String("net"),
"ipc": c.String("ipc"),
@@ -505,8 +520,41 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
"uts": c.String("uts"),
}
+ originalPodName := c.String("pod")
+ podName := strings.Replace(originalPodName, "new:", "", 1)
+ // after we strip out :new, make sure there is something left for a pod name
+ if len(podName) < 1 && c.IsSet("pod") {
+ return nil, errors.Errorf("new pod name must be at least one character")
+ }
if c.IsSet("pod") {
- namespaces, err = configurePod(c, runtime, namespaces)
+ if strings.HasPrefix(originalPodName, "new:") {
+ // pod does not exist; lets make it
+ var podOptions []libpod.PodCreateOption
+ podOptions = append(podOptions, libpod.WithPodName(podName), libpod.WithInfraContainer(), libpod.WithPodCgroups())
+ if len(portBindings) > 0 {
+ ociPortBindings, err := cc.NatToOCIPortBindings(portBindings)
+ if err != nil {
+ return nil, err
+ }
+ podOptions = append(podOptions, libpod.WithInfraContainerPorts(ociPortBindings))
+ }
+
+ podNsOptions, err := shared.GetNamespaceOptions(strings.Split(DefaultKernelNamespaces, ","))
+ if err != nil {
+ return nil, err
+ }
+ podOptions = append(podOptions, podNsOptions...)
+ // make pod
+ pod, err := runtime.NewPod(ctx, podOptions...)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("pod %s created by new container request", pod.ID())
+
+ // The container now cannot have port bindings; so we reset the map
+ portBindings = make(map[nat.Port][]nat.PortBinding)
+ }
+ namespaces, err = configurePod(c, runtime, namespaces, podName)
if err != nil {
return nil, err
}
@@ -535,7 +583,7 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
// Make sure if network is set to container namespace, port binding is not also being asked for
netMode := ns.NetworkMode(namespaces["net"])
if netMode.IsContainer() {
- if len(c.StringSlice("publish")) > 0 || c.Bool("publish-all") {
+ if len(portBindings) > 0 {
return nil, errors.Errorf("cannot set port bindings on an existing container network namespace")
}
}
@@ -644,15 +692,6 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
return nil, errors.Errorf("No command specified on command line or as CMD or ENTRYPOINT in this image")
}
- // EXPOSED PORTS
- var portBindings map[nat.Port][]nat.PortBinding
- if data != nil {
- portBindings, err = cc.ExposedPorts(c.StringSlice("expose"), c.StringSlice("publish"), c.Bool("publish-all"), data.ContainerConfig.ExposedPorts)
- if err != nil {
- return nil, err
- }
- }
-
// SHM Size
shmSize, err := units.FromHumanSize(c.String("shm-size"))
if err != nil {
@@ -670,6 +709,11 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
if util.StringInSlice(".", c.StringSlice("dns-search")) && len(c.StringSlice("dns-search")) > 1 {
return nil, errors.Errorf("cannot pass additional search domains when also specifying '.'")
}
+ if !netMode.IsPrivate() {
+ if c.IsSet("dns-search") || c.IsSet("dns") || c.IsSet("dns-opt") {
+ return nil, errors.Errorf("specifying DNS flags when network mode is shared with the host or another container is not allowed")
+ }
+ }
// Validate domains are good
for _, dom := range c.StringSlice("dns-search") {
@@ -741,7 +785,7 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
NetMode: netMode,
UtsMode: utsMode,
PidMode: pidMode,
- Pod: c.String("pod"),
+ Pod: podName,
Privileged: c.Bool("privileged"),
Publish: c.StringSlice("publish"),
PublishAll: c.Bool("publish-all"),
diff --git a/cmd/podman/create_cli.go b/cmd/podman/create_cli.go
index 218e9b806..b3a30d185 100644
--- a/cmd/podman/create_cli.go
+++ b/cmd/podman/create_cli.go
@@ -201,12 +201,13 @@ func parseVolumesFrom(volumesFrom []string) error {
}
func validateVolumeHostDir(hostDir string) error {
- if !filepath.IsAbs(hostDir) {
- return errors.Errorf("invalid host path, must be an absolute path %q", hostDir)
- }
- if _, err := os.Stat(hostDir); err != nil {
- return errors.Wrapf(err, "error checking path %q", hostDir)
+ if filepath.IsAbs(hostDir) {
+ if _, err := os.Stat(hostDir); err != nil {
+ return errors.Wrapf(err, "error checking path %q", hostDir)
+ }
}
+ // If hostDir is not an absolute path, that means the user wants to create a
+ // named volume. This will be done later on in the code.
return nil
}
diff --git a/cmd/podman/exists.go b/cmd/podman/exists.go
new file mode 100644
index 000000000..2e2559ec7
--- /dev/null
+++ b/cmd/podman/exists.go
@@ -0,0 +1,120 @@
+package main
+
+import (
+ "os"
+
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+)
+
+var (
+ imageExistsDescription = `
+ podman image exists
+
+ Check if an image exists in local storage
+`
+
+ imageExistsCommand = cli.Command{
+ Name: "exists",
+ Usage: "Check if an image exists in local storage",
+ Description: imageExistsDescription,
+ Action: imageExistsCmd,
+ ArgsUsage: "IMAGE-NAME",
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+var (
+ containerExistsDescription = `
+ podman container exists
+
+ Check if a container exists in local storage
+`
+
+ containerExistsCommand = cli.Command{
+ Name: "exists",
+ Usage: "Check if a container exists in local storage",
+ Description: containerExistsDescription,
+ Action: containerExistsCmd,
+ ArgsUsage: "CONTAINER-NAME",
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+var (
+ podExistsDescription = `
+ podman pod exists
+
+ Check if a pod exists in local storage
+`
+
+ podExistsCommand = cli.Command{
+ Name: "exists",
+ Usage: "Check if a pod exists in local storage",
+ Description: podExistsDescription,
+ Action: podExistsCmd,
+ ArgsUsage: "POD-NAME",
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+func imageExistsCmd(c *cli.Context) error {
+ args := c.Args()
+ if len(args) > 1 || len(args) < 1 {
+ return errors.New("you may only check for the existence of one image at a time")
+ }
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+ if _, err := runtime.ImageRuntime().NewFromLocal(args[0]); err != nil {
+ if errors.Cause(err) == image.ErrNoSuchImage {
+ os.Exit(1)
+ }
+ return err
+ }
+ return nil
+}
+
+func containerExistsCmd(c *cli.Context) error {
+ args := c.Args()
+ if len(args) > 1 || len(args) < 1 {
+ return errors.New("you may only check for the existence of one container at a time")
+ }
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+ if _, err := runtime.LookupContainer(args[0]); err != nil {
+ if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ os.Exit(1)
+ }
+ return err
+ }
+ return nil
+}
+
+func podExistsCmd(c *cli.Context) error {
+ args := c.Args()
+ if len(args) > 1 || len(args) < 1 {
+ return errors.New("you may only check for the existence of one pod at a time")
+ }
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ if _, err := runtime.LookupPod(args[0]); err != nil {
+ if errors.Cause(err) == libpod.ErrNoSuchPod {
+ os.Exit(1)
+ }
+ return err
+ }
+ return nil
+}
diff --git a/cmd/podman/generate.go b/cmd/podman/generate.go
new file mode 100644
index 000000000..765d0ee70
--- /dev/null
+++ b/cmd/podman/generate.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+ "github.com/urfave/cli"
+)
+
+var (
+ generateSubCommands = []cli.Command{
+ containerKubeCommand,
+ }
+
+ generateDescription = "generate structured data based for a containers and pods"
+ kubeCommand = cli.Command{
+ Name: "generate",
+ Usage: "generated structured data",
+ Description: generateDescription,
+ ArgsUsage: "",
+ Subcommands: generateSubCommands,
+ UseShortOptionHandling: true,
+ OnUsageError: usageErrorHandler,
+ Hidden: true,
+ }
+)
diff --git a/cmd/podman/generate_kube.go b/cmd/podman/generate_kube.go
new file mode 100644
index 000000000..8f2f0de32
--- /dev/null
+++ b/cmd/podman/generate_kube.go
@@ -0,0 +1,111 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/rootless"
+ podmanVersion "github.com/containers/libpod/version"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+ "k8s.io/api/core/v1"
+)
+
+var (
+ containerKubeFlags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "service, s",
+ Usage: "generate YAML for kubernetes service object",
+ },
+ }
+ containerKubeDescription = "Generate Kubernetes Pod YAML"
+ containerKubeCommand = cli.Command{
+ Name: "kube",
+ Usage: "Generate Kubernetes pod YAML for a container or pod",
+ Description: containerKubeDescription,
+ Flags: sortFlags(containerKubeFlags),
+ Action: generateKubeYAMLCmd,
+ ArgsUsage: "CONTAINER|POD-NAME",
+ UseShortOptionHandling: true,
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+// generateKubeYAMLCmdgenerates or replays kube
+func generateKubeYAMLCmd(c *cli.Context) error {
+ var (
+ podYAML *v1.Pod
+ container *libpod.Container
+ err error
+ output []byte
+ pod *libpod.Pod
+ marshalledPod []byte
+ marshalledService []byte
+ servicePorts []v1.ServicePort
+ )
+
+ if rootless.IsRootless() {
+ return errors.Wrapf(libpod.ErrNotImplemented, "rootless users")
+ }
+ args := c.Args()
+ if len(args) > 1 || (len(args) < 1 && !c.Bool("latest")) {
+ return errors.Errorf("you must provide one container|pod ID or name or --latest")
+ }
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ // Get the container in question
+ container, err = runtime.LookupContainer(args[0])
+ if err != nil {
+ pod, err = runtime.LookupPod(args[0])
+ if err != nil {
+ return err
+ }
+ podYAML, servicePorts, err = pod.GenerateForKube()
+ } else {
+ if len(container.Dependencies()) > 0 {
+ return errors.Wrapf(libpod.ErrNotImplemented, "containers with dependencies")
+ }
+ podYAML, err = container.GenerateForKube()
+ }
+ if err != nil {
+ return err
+ }
+
+ if c.Bool("service") {
+ serviceYAML := libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts)
+ marshalledService, err = yaml.Marshal(serviceYAML)
+ if err != nil {
+ return err
+ }
+ }
+ // Marshall the results
+ marshalledPod, err = yaml.Marshal(podYAML)
+ if err != nil {
+ return err
+ }
+
+ header := `# Generation of Kubernetes YAML is still under development!
+#
+# Save the output of this file and use kubectl create -f to import
+# it into Kubernetes.
+#
+# Created with podman-%s
+`
+ output = append(output, []byte(fmt.Sprintf(header, podmanVersion.Version))...)
+ output = append(output, marshalledPod...)
+ if c.Bool("service") {
+ output = append(output, []byte("---\n")...)
+ output = append(output, marshalledService...)
+ }
+ // Output the v1.Pod with the v1.Container
+ fmt.Println(string(output))
+
+ return nil
+}
diff --git a/cmd/podman/image.go b/cmd/podman/image.go
index e67f61799..e978b9cf5 100644
--- a/cmd/podman/image.go
+++ b/cmd/podman/image.go
@@ -9,15 +9,17 @@ var (
buildCommand,
historyCommand,
importCommand,
+ imageExistsCommand,
inspectCommand,
loadCommand,
lsImagesCommand,
- // pruneCommand,
+ pruneImagesCommand,
pullCommand,
pushCommand,
rmImageCommand,
saveCommand,
tagCommand,
+ trustCommand,
}
imageDescription = "Manage images"
diff --git a/cmd/podman/images.go b/cmd/podman/images.go
index a8955e49e..a1aeb6042 100644
--- a/cmd/podman/images.go
+++ b/cmd/podman/images.go
@@ -6,8 +6,7 @@ import (
"sort"
"strings"
"time"
-
- "github.com/sirupsen/logrus"
+ "unicode"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/libpodruntime"
@@ -16,6 +15,7 @@ import (
"github.com/docker/go-units"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@@ -280,7 +280,9 @@ func getImagesTemplateOutput(ctx context.Context, runtime *libpod.Runtime, image
if !opts.noTrunc {
imageID = shortID(img.ID())
}
+
// get all specified repo:tag pairs and print them separately
+ outer:
for repo, tags := range image.ReposToMap(img.Names()) {
for _, tag := range tags {
size, err := img.Size(ctx)
@@ -289,6 +291,8 @@ func getImagesTemplateOutput(ctx context.Context, runtime *libpod.Runtime, image
sizeStr = err.Error()
} else {
sizeStr = units.HumanSizeWithPrecision(float64(*size), 3)
+ lastNumIdx := strings.LastIndexFunc(sizeStr, unicode.IsNumber)
+ sizeStr = sizeStr[:lastNumIdx+1] + " " + sizeStr[lastNumIdx+1:]
}
params := imagesTemplateParams{
Repository: repo,
@@ -300,6 +304,10 @@ func getImagesTemplateOutput(ctx context.Context, runtime *libpod.Runtime, image
Size: sizeStr,
}
imagesOutput = append(imagesOutput, params)
+ if opts.quiet { // Show only one image ID when quiet
+ break outer
+ }
+
}
}
}
@@ -374,13 +382,13 @@ func CreateFilterFuncs(ctx context.Context, r *libpod.Runtime, c *cli.Context, i
case "before":
before, err := r.ImageRuntime().NewFromLocal(splitFilter[1])
if err != nil {
- return nil, errors.Wrapf(err, "unable to find image % in local stores", splitFilter[1])
+ return nil, errors.Wrapf(err, "unable to find image %s in local stores", splitFilter[1])
}
filterFuncs = append(filterFuncs, image.CreatedBeforeFilter(before.Created()))
case "after":
after, err := r.ImageRuntime().NewFromLocal(splitFilter[1])
if err != nil {
- return nil, errors.Wrapf(err, "unable to find image % in local stores", splitFilter[1])
+ return nil, errors.Wrapf(err, "unable to find image %s in local stores", splitFilter[1])
}
filterFuncs = append(filterFuncs, image.CreatedAfterFilter(after.Created()))
case "dangling":
diff --git a/cmd/podman/images_prune.go b/cmd/podman/images_prune.go
new file mode 100644
index 000000000..cb72a498f
--- /dev/null
+++ b/cmd/podman/images_prune.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/shared"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+)
+
+var (
+ pruneImagesDescription = `
+ podman image prune
+
+ Removes all unnamed images from local storage
+`
+
+ pruneImagesCommand = cli.Command{
+ Name: "prune",
+ Usage: "Remove unused images",
+ Description: pruneImagesDescription,
+ Action: pruneImagesCmd,
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+func pruneImagesCmd(c *cli.Context) error {
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ return shared.Prune(runtime.ImageRuntime())
+}
diff --git a/cmd/podman/import.go b/cmd/podman/import.go
index be516e4fa..144354fa6 100644
--- a/cmd/podman/import.go
+++ b/cmd/podman/import.go
@@ -139,7 +139,7 @@ func downloadFromURL(source string) (string, error) {
_, err = io.Copy(outFile, response.Body)
if err != nil {
- return "", errors.Wrapf(err, "error saving %q to %q", source, outFile)
+ return "", errors.Wrapf(err, "error saving %s to %s", source, outFile.Name())
}
return outFile.Name(), nil
diff --git a/cmd/podman/inspect.go b/cmd/podman/inspect.go
index bd9e8c13c..6ffcde55f 100644
--- a/cmd/podman/inspect.go
+++ b/cmd/podman/inspect.go
@@ -119,7 +119,7 @@ func iterateInput(ctx context.Context, c *cli.Context, args []string, runtime *l
}
libpodInspectData, err := ctr.Inspect(c.Bool("size"))
if err != nil {
- inspectError = errors.Wrapf(err, "error getting libpod container inspect data %q", ctr.ID)
+ inspectError = errors.Wrapf(err, "error getting libpod container inspect data %s", ctr.ID())
break
}
data, err = shared.GetCtrInspectInfo(ctr, libpodInspectData)
@@ -154,12 +154,12 @@ func iterateInput(ctx context.Context, c *cli.Context, args []string, runtime *l
} else {
libpodInspectData, err := ctr.Inspect(c.Bool("size"))
if err != nil {
- inspectError = errors.Wrapf(err, "error getting libpod container inspect data %q", ctr.ID)
+ inspectError = errors.Wrapf(err, "error getting libpod container inspect data %s", ctr.ID())
break
}
data, err = shared.GetCtrInspectInfo(ctr, libpodInspectData)
if err != nil {
- inspectError = errors.Wrapf(err, "error parsing container data %q", ctr.ID)
+ inspectError = errors.Wrapf(err, "error parsing container data %s", ctr.ID())
break
}
}
diff --git a/cmd/podman/kill.go b/cmd/podman/kill.go
index 27882aeee..cfe4b4218 100644
--- a/cmd/podman/kill.go
+++ b/cmd/podman/kill.go
@@ -43,7 +43,6 @@ var (
// killCmd kills one or more containers with a signal
func killCmd(c *cli.Context) error {
var (
- lastError error
killFuncs []shared.ParallelWorkerInput
killSignal uint = uint(syscall.SIGTERM)
)
@@ -75,8 +74,12 @@ func killCmd(c *cli.Context) error {
containers, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
if err != nil {
- return err
+ if len(containers) == 0 {
+ return err
+ }
+ fmt.Println(err.Error())
}
+
for _, ctr := range containers {
con := ctr
f := func() error {
@@ -95,18 +98,6 @@ func killCmd(c *cli.Context) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- killErrors := shared.ParallelExecuteWorkerPool(maxWorkers, killFuncs)
-
- for cid, result := range killErrors {
- if result != nil {
- if len(killErrors) > 1 {
- fmt.Println(result.Error())
- }
- lastError = result
- continue
- }
- fmt.Println(cid)
- }
-
- return lastError
+ killErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, killFuncs)
+ return printParallelOutput(killErrors, errCount)
}
diff --git a/cmd/podman/libpodruntime/runtime.go b/cmd/podman/libpodruntime/runtime.go
index a4b3581be..d7a0dd931 100644
--- a/cmd/podman/libpodruntime/runtime.go
+++ b/cmd/podman/libpodruntime/runtime.go
@@ -11,31 +11,22 @@ import (
// GetRuntime generates a new libpod runtime configured by command line options
func GetRuntime(c *cli.Context) (*libpod.Runtime, error) {
- storageOpts, err := util.GetDefaultStoreOptions()
- if err != nil {
- return nil, err
- }
- return GetRuntimeWithStorageOpts(c, &storageOpts)
-}
+ storageOpts := new(storage.StoreOptions)
+ options := []libpod.RuntimeOption{}
-// GetContainerRuntime generates a new libpod runtime configured by command line options for containers
-func GetContainerRuntime(c *cli.Context) (*libpod.Runtime, error) {
- mappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidmap"), c.String("subgidmap"))
+ _, volumePath, err := util.GetDefaultStoreOptions()
if err != nil {
return nil, err
}
- storageOpts, err := util.GetDefaultStoreOptions()
- if err != nil {
- return nil, err
- }
- storageOpts.UIDMap = mappings.UIDMap
- storageOpts.GIDMap = mappings.GIDMap
- return GetRuntimeWithStorageOpts(c, &storageOpts)
-}
-// GetRuntime generates a new libpod runtime configured by command line options
-func GetRuntimeWithStorageOpts(c *cli.Context, storageOpts *storage.StoreOptions) (*libpod.Runtime, error) {
- options := []libpod.RuntimeOption{}
+ if c.IsSet("uidmap") || c.IsSet("gidmap") || c.IsSet("subuidmap") || c.IsSet("subgidmap") {
+ mappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidmap"), c.String("subgidmap"))
+ if err != nil {
+ return nil, err
+ }
+ storageOpts.UIDMap = mappings.UIDMap
+ storageOpts.GIDMap = mappings.GIDMap
+ }
if c.GlobalIsSet("root") {
storageOpts.GraphRoot = c.GlobalString("root")
@@ -90,8 +81,8 @@ func GetRuntimeWithStorageOpts(c *cli.Context, storageOpts *storage.StoreOptions
if c.GlobalIsSet("default-mounts-file") {
options = append(options, libpod.WithDefaultMountsFile(c.GlobalString("default-mounts-file")))
}
- if c.GlobalIsSet("hooks-dir-path") {
- options = append(options, libpod.WithHooksDir(c.GlobalString("hooks-dir-path")))
+ if c.GlobalIsSet("hooks-dir") {
+ options = append(options, libpod.WithHooksDir(c.GlobalStringSlice("hooks-dir")...))
}
// TODO flag to set CNI plugins dir?
@@ -104,6 +95,7 @@ func GetRuntimeWithStorageOpts(c *cli.Context, storageOpts *storage.StoreOptions
if c.IsSet("infra-command") {
options = append(options, libpod.WithDefaultInfraCommand(c.String("infra-command")))
}
+ options = append(options, libpod.WithVolumePath(volumePath))
if c.IsSet("config") {
return libpod.NewRuntimeFromConfig(c.String("config"), options...)
}
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index aa26d1466..4452651f8 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -2,13 +2,13 @@ package main
import (
"bufio"
- "context"
"fmt"
"os"
"strings"
"github.com/containers/image/docker"
"github.com/containers/image/pkg/docker/config"
+ "github.com/containers/image/types"
"github.com/containers/libpod/libpod/common"
"github.com/pkg/errors"
"github.com/urfave/cli"
@@ -34,6 +34,10 @@ var (
Usage: "Pathname of a directory containing TLS certificates and keys used to connect to the registry",
},
cli.BoolTFlag{
+ Name: "get-login",
+ Usage: "Return the current login user for the registry",
+ },
+ cli.BoolTFlag{
Name: "tls-verify",
Usage: "Require HTTPS and verify certificates when contacting registries (default: true)",
},
@@ -60,27 +64,65 @@ func loginCmd(c *cli.Context) error {
if len(args) == 0 {
return errors.Errorf("registry must be given")
}
- server := scrubServer(args[0])
+ server := registryFromFullName(scrubServer(args[0]))
authfile := getAuthFile(c.String("authfile"))
sc := common.GetSystemContext("", authfile, false)
+ if c.IsSet("get-login") {
+ user, err := config.GetUserLoggedIn(sc, server)
+
+ if err != nil {
+ return errors.Wrapf(err, "unable to check for login user")
+ }
+
+ if user == "" {
+ return errors.Errorf("not logged into %s", server)
+ }
+
+ fmt.Printf("%s\n", user)
+ return nil
+ }
+
// username of user logged in to server (if one exists)
- userFromAuthFile, err := config.GetUserLoggedIn(sc, server)
+ userFromAuthFile, passFromAuthFile, err := config.GetAuthentication(sc, server)
if err != nil {
return errors.Wrapf(err, "error getting logged-in user")
}
- username, password, err := getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile)
+
+ ctx := getContext()
+
+ var (
+ username string
+ password string
+ )
+
+ if userFromAuthFile != "" {
+ username = userFromAuthFile
+ password = passFromAuthFile
+ fmt.Println("Authenticating with existing credentials...")
+ if err := docker.CheckAuth(ctx, sc, username, password, server); err == nil {
+ fmt.Println("Existing credentials are valid. Already logged in to", server)
+ return nil
+ }
+ fmt.Println("Existing credentials are invalid, please enter valid username and password")
+ }
+
+ username, password, err = getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile)
if err != nil {
return errors.Wrapf(err, "error getting username and password")
}
- sc.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify")
+
+ if c.IsSet("tls-verify") {
+ sc.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ }
if c.String("cert-dir") != "" {
sc.DockerCertPath = c.String("cert-dir")
}
- if err = docker.CheckAuth(context.TODO(), sc, username, password, server); err == nil {
- if err := config.SetAuthentication(sc, server, username, password); err != nil {
+ if err = docker.CheckAuth(ctx, sc, username, password, server); err == nil {
+ // Write the new credentials to the authfile
+ if err = config.SetAuthentication(sc, server, username, password); err != nil {
return err
}
}
@@ -126,3 +168,14 @@ func getUserAndPass(username, password, userFromAuthFile string) (string, string
}
return strings.TrimSpace(username), password, err
}
+
+// registryFromFullName gets the registry from the input. If the input is of the form
+// quay.io/myuser/myimage, it will parse it and just return quay.io
+// It also returns true if a full image name was given
+func registryFromFullName(input string) string {
+ split := strings.Split(input, "/")
+ if len(split) > 1 {
+ return split[0]
+ }
+ return split[0]
+}
diff --git a/cmd/podman/logs.go b/cmd/podman/logs.go
index 84aca5e61..75947c34e 100644
--- a/cmd/podman/logs.go
+++ b/cmd/podman/logs.go
@@ -40,14 +40,15 @@ var (
logsDescription = "The podman logs command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution" +
"order when combined with podman run (i.e. your run may not have generated any logs at the time you execute podman logs"
logsCommand = cli.Command{
- Name: "logs",
- Usage: "Fetch the logs of a container",
- Description: logsDescription,
- Flags: sortFlags(logsFlags),
- Action: logsCmd,
- ArgsUsage: "CONTAINER",
- SkipArgReorder: true,
- OnUsageError: usageErrorHandler,
+ Name: "logs",
+ Usage: "Fetch the logs of a container",
+ Description: logsDescription,
+ Flags: sortFlags(logsFlags),
+ Action: logsCmd,
+ ArgsUsage: "CONTAINER",
+ SkipArgReorder: true,
+ OnUsageError: usageErrorHandler,
+ UseShortOptionHandling: true,
}
)
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 38eac4504..2db6c5dec 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -2,13 +2,13 @@ package main
import (
"fmt"
+ "log/syslog"
"os"
"os/exec"
"runtime/pprof"
"syscall"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/hooks"
_ "github.com/containers/libpod/pkg/hooks/0.1.0"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/version"
@@ -17,7 +17,6 @@ import (
"github.com/sirupsen/logrus"
lsyslog "github.com/sirupsen/logrus/hooks/syslog"
"github.com/urfave/cli"
- "log/syslog"
)
// This is populated by the Makefile from the VERSION file
@@ -35,8 +34,10 @@ var cmdsNotRequiringRootless = map[string]bool{
// If this change, please also update libpod.refreshRootless()
"login": true,
"logout": true,
+ "mount": true,
"kill": true,
"pause": true,
+ "restart": true,
"run": true,
"unpause": true,
"search": true,
@@ -77,6 +78,7 @@ func main() {
infoCommand,
inspectCommand,
killCommand,
+ kubeCommand,
loadCommand,
loginCommand,
logoutCommand,
@@ -102,6 +104,7 @@ func main() {
umountCommand,
unpauseCommand,
versionCommand,
+ volumeCommand,
waitCommand,
}
@@ -205,11 +208,9 @@ func main() {
Usage: "path to default mounts file",
Hidden: true,
},
- cli.StringFlag{
- Name: "hooks-dir-path",
- Usage: "set the OCI hooks directory path",
- Value: hooks.DefaultDir,
- Hidden: true,
+ cli.StringSliceFlag{
+ Name: "hooks-dir",
+ Usage: "set the OCI hooks directory path (may be set multiple times)",
},
cli.IntFlag{
Name: "max-workers",
diff --git a/cmd/podman/mount.go b/cmd/podman/mount.go
index 585f506cd..c91115597 100644
--- a/cmd/podman/mount.go
+++ b/cmd/podman/mount.go
@@ -3,9 +3,11 @@ package main
import (
js "encoding/json"
"fmt"
+ "os"
of "github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -52,6 +54,9 @@ func mountCmd(c *cli.Context) error {
if err := validateFlags(c, mountFlags); err != nil {
return err
}
+ if os.Geteuid() != 0 {
+ rootless.SetSkipStorageSetup(true)
+ }
runtime, err := libpodruntime.GetRuntime(c)
if err != nil {
@@ -59,6 +64,22 @@ func mountCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
+ if os.Geteuid() != 0 {
+ if driver := runtime.GetConfig().StorageConfig.GraphDriverName; driver != "vfs" {
+ // Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
+ // of the mount command.
+ return fmt.Errorf("cannot mount using driver %s in rootless mode", driver)
+ }
+
+ became, ret, err := rootless.BecomeRootInUserNS()
+ if err != nil {
+ return err
+ }
+ if became {
+ os.Exit(ret)
+ }
+ }
+
formats := map[string]bool{
"": true,
of.JSONString: true,
diff --git a/cmd/podman/pause.go b/cmd/podman/pause.go
index 1e1585216..fcb2f3cb8 100644
--- a/cmd/podman/pause.go
+++ b/cmd/podman/pause.go
@@ -1,7 +1,6 @@
package main
import (
- "fmt"
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
@@ -37,7 +36,6 @@ var (
func pauseCmd(c *cli.Context) error {
var (
- lastError error
pauseContainers []*libpod.Container
pauseFuncs []shared.ParallelWorkerInput
)
@@ -90,17 +88,6 @@ func pauseCmd(c *cli.Context) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- pauseErrors := shared.ParallelExecuteWorkerPool(maxWorkers, pauseFuncs)
-
- for cid, result := range pauseErrors {
- if result != nil {
- if len(pauseErrors) > 1 {
- fmt.Println(result.Error())
- }
- lastError = result
- continue
- }
- fmt.Println(cid)
- }
- return lastError
+ pauseErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, pauseFuncs)
+ return printParallelOutput(pauseErrors, errCount)
}
diff --git a/cmd/podman/pod.go b/cmd/podman/pod.go
index 0c6ec5e8c..a30361134 100644
--- a/cmd/podman/pod.go
+++ b/cmd/podman/pod.go
@@ -11,6 +11,7 @@ Pods are a group of one or more containers sharing the same network, pid and ipc
`
podSubCommands = []cli.Command{
podCreateCommand,
+ podExistsCommand,
podInspectCommand,
podKillCommand,
podPauseCommand,
diff --git a/cmd/podman/pod_create.go b/cmd/podman/pod_create.go
index 63fa6b294..967ce7610 100644
--- a/cmd/podman/pod_create.go
+++ b/cmd/podman/pod_create.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -58,6 +59,10 @@ var podCreateFlags = []cli.Flag{
Name: "pod-id-file",
Usage: "Write the pod ID to the file",
},
+ cli.StringSliceFlag{
+ Name: "publish, p",
+ Usage: "Publish a container's port, or a range of ports, to the host (default [])",
+ },
cli.StringFlag{
Name: "share",
Usage: "A comma delimited list of kernel namespaces the pod will share",
@@ -102,6 +107,16 @@ func podCreateCmd(c *cli.Context) error {
defer podIdFile.Close()
defer podIdFile.Sync()
}
+
+ if len(c.StringSlice("publish")) > 0 {
+ if !c.BoolT("infra") {
+ return errors.Errorf("you must have an infra container to publish port bindings to the host")
+ }
+ if rootless.IsRootless() {
+ return errors.Errorf("rootless networking does not allow port binding to the host")
+ }
+ }
+
if !c.BoolT("infra") && c.IsSet("share") && c.String("share") != "none" && c.String("share") != "" {
return errors.Errorf("You cannot share kernel namespaces on the pod level without an infra container")
}
@@ -131,6 +146,14 @@ func podCreateCmd(c *cli.Context) error {
options = append(options, nsOptions...)
}
+ if len(c.StringSlice("publish")) > 0 {
+ portBindings, err := shared.CreatePortBindings(c.StringSlice("publish"))
+ if err != nil {
+ return err
+ }
+ options = append(options, libpod.WithInfraContainerPorts(portBindings))
+
+ }
// always have containers use pod cgroups
// User Opt out is not yet supported
options = append(options, libpod.WithPodCgroups())
diff --git a/cmd/podman/pod_stop.go b/cmd/podman/pod_stop.go
index 14114aa11..d49ba8a00 100644
--- a/cmd/podman/pod_stop.go
+++ b/cmd/podman/pod_stop.go
@@ -2,7 +2,6 @@ package main
import (
"fmt"
-
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -16,6 +15,10 @@ var (
Usage: "stop all running pods",
},
LatestPodFlag,
+ cli.UintFlag{
+ Name: "timeout, time, t",
+ Usage: "Seconds to wait for pod stop before killing the container",
+ },
}
podStopDescription = `
podman pod stop
@@ -35,6 +38,7 @@ var (
)
func podStopCmd(c *cli.Context) error {
+ timeout := -1
if err := checkMutuallyExclusiveFlags(c); err != nil {
return err
}
@@ -52,9 +56,12 @@ func podStopCmd(c *cli.Context) error {
ctx := getContext()
+ if c.IsSet("timeout") {
+ timeout = int(c.Uint("timeout"))
+ }
for _, pod := range pods {
// set cleanup to true to clean mounts and namespaces
- ctr_errs, err := pod.Stop(ctx, true)
+ ctr_errs, err := pod.StopWithTimeout(ctx, true, timeout)
if ctr_errs != nil {
for ctr, err := range ctr_errs {
if lastError != nil {
diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go
index 83274c9a8..7a4a80769 100644
--- a/cmd/podman/ps.go
+++ b/cmd/podman/ps.go
@@ -184,7 +184,7 @@ var (
Usage: "Display the extended information",
},
cli.BoolFlag{
- Name: "pod",
+ Name: "pod, p",
Usage: "Print the ID and name of the pod the containers are associated with",
},
cli.BoolFlag{
@@ -200,6 +200,10 @@ var (
Usage: "Sort output by command, created, id, image, names, runningfor, size, or status",
Value: "created",
},
+ cli.BoolFlag{
+ Name: "sync",
+ Usage: "Sync container state with OCI runtime",
+ },
}
psDescription = "Prints out information about the containers"
psCommand = cli.Command{
@@ -260,6 +264,7 @@ func psCmd(c *cli.Context) error {
Size: c.Bool("size"),
Namespace: c.Bool("namespace"),
Sort: c.String("sort"),
+ Sync: c.Bool("sync"),
}
filters := c.StringSlice("filter")
diff --git a/cmd/podman/pull.go b/cmd/podman/pull.go
index 8fb3971bd..47130805e 100644
--- a/cmd/podman/pull.go
+++ b/cmd/podman/pull.go
@@ -64,7 +64,6 @@ specified, the image with the 'latest' tag (if it exists) is pulled
// pullCmd gets the data from the command line and calls pullImage
// to copy an image from a registry to a local machine
func pullCmd(c *cli.Context) error {
- forceSecure := false
runtime, err := libpodruntime.GetRuntime(c)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
@@ -104,12 +103,11 @@ func pullCmd(c *cli.Context) error {
}
dockerRegistryOptions := image2.DockerRegistryOptions{
- DockerRegistryCreds: registryCreds,
- DockerCertPath: c.String("cert-dir"),
- DockerInsecureSkipTLSVerify: !c.BoolT("tls-verify"),
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: c.String("cert-dir"),
}
if c.IsSet("tls-verify") {
- forceSecure = c.Bool("tls-verify")
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
}
// Possible for docker-archive to have multiple tags, so use LoadFromArchiveReference instead
@@ -125,7 +123,7 @@ func pullCmd(c *cli.Context) error {
imgID = newImage[0].ID()
} else {
authfile := getAuthFile(c.String("authfile"))
- newImage, err := runtime.ImageRuntime().New(getContext(), image, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, true, forceSecure)
+ newImage, err := runtime.ImageRuntime().New(getContext(), image, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, true)
if err != nil {
return errors.Wrapf(err, "error pulling image %q", image)
}
diff --git a/cmd/podman/push.go b/cmd/podman/push.go
index 331f92cd2..82589f3f1 100644
--- a/cmd/podman/push.go
+++ b/cmd/podman/push.go
@@ -81,7 +81,6 @@ func pushCmd(c *cli.Context) error {
var (
registryCreds *types.DockerAuthConfig
destName string
- forceSecure bool
)
args := c.Args()
@@ -108,7 +107,6 @@ func pushCmd(c *cli.Context) error {
}
certPath := c.String("cert-dir")
- skipVerify := !c.BoolT("tls-verify")
removeSignatures := c.Bool("remove-signatures")
signBy := c.String("sign-by")
@@ -145,14 +143,12 @@ func pushCmd(c *cli.Context) error {
}
}
- if c.IsSet("tls-verify") {
- forceSecure = c.Bool("tls-verify")
- }
-
dockerRegistryOptions := image.DockerRegistryOptions{
- DockerRegistryCreds: registryCreds,
- DockerCertPath: certPath,
- DockerInsecureSkipTLSVerify: skipVerify,
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: certPath,
+ }
+ if c.IsSet("tls-verify") {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
}
so := image.SigningOptions{
@@ -167,5 +163,5 @@ func pushCmd(c *cli.Context) error {
authfile := getAuthFile(c.String("authfile"))
- return newImage.PushImageToHeuristicDestination(getContext(), destName, manifestType, authfile, c.String("signature-policy"), writer, c.Bool("compress"), so, &dockerRegistryOptions, forceSecure, nil)
+ return newImage.PushImageToHeuristicDestination(getContext(), destName, manifestType, authfile, c.String("signature-policy"), writer, c.Bool("compress"), so, &dockerRegistryOptions, nil)
}
diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go
index 2e264db79..c6fe1025a 100644
--- a/cmd/podman/restart.go
+++ b/cmd/podman/restart.go
@@ -2,10 +2,12 @@ package main
import (
"fmt"
+ "os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -46,10 +48,13 @@ func restartCmd(c *cli.Context) error {
var (
restartFuncs []shared.ParallelWorkerInput
containers []*libpod.Container
- lastError error
restartContainers []*libpod.Container
)
+ if os.Geteuid() != 0 {
+ rootless.SetSkipStorageSetup(true)
+ }
+
args := c.Args()
runOnly := c.Bool("running")
all := c.Bool("all")
@@ -98,6 +103,29 @@ func restartCmd(c *cli.Context) error {
}
}
+ maxWorkers := shared.Parallelize("restart")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalInt("max-workers")
+ }
+
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ if rootless.IsRootless() {
+ // With rootless containers we cannot really restart an existing container
+ // as we would need to join the mount namespace as well to be able to reuse
+ // the storage.
+ if err := stopRootlessContainers(restartContainers, timeout, useTimeout, maxWorkers); err != nil {
+ return err
+ }
+ became, ret, err := rootless.BecomeRootInUserNS()
+ if err != nil {
+ return err
+ }
+ if became {
+ os.Exit(ret)
+ }
+ }
+
// We now have a slice of all the containers to be restarted. Iterate them to
// create restart Funcs with a timeout as needed
for _, ctr := range restartContainers {
@@ -117,22 +145,49 @@ func restartCmd(c *cli.Context) error {
})
}
- maxWorkers := shared.Parallelize("restart")
- if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
- }
+ restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs)
+ return printParallelOutput(restartErrors, errCount)
+}
- logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+func stopRootlessContainers(stopContainers []*libpod.Container, timeout uint, useTimeout bool, maxWorkers int) error {
+ var stopFuncs []shared.ParallelWorkerInput
+ for _, ctr := range stopContainers {
+ state, err := ctr.State()
+ if err != nil {
+ return err
+ }
+ if state != libpod.ContainerStateRunning {
+ continue
+ }
+
+ ctrTimeout := ctr.StopTimeout()
+ if useTimeout {
+ ctrTimeout = timeout
+ }
- restartErrors := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs)
+ c := ctr
+ f := func() error {
+ return c.StopWithTimeout(ctrTimeout)
+ }
- for cid, result := range restartErrors {
- if result != nil {
- fmt.Println(result.Error())
- lastError = result
- continue
+ stopFuncs = append(stopFuncs, shared.ParallelWorkerInput{
+ ContainerID: c.ID(),
+ ParallelFunc: f,
+ })
+
+ restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, stopFuncs)
+ var lastError error
+ for _, result := range restartErrors {
+ if result != nil {
+ if errCount > 1 {
+ fmt.Println(result.Error())
+ }
+ lastError = result
+ }
+ }
+ if lastError != nil {
+ return lastError
}
- fmt.Println(cid)
}
- return lastError
+ return nil
}
diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go
index 067a2b5d4..bc2a71ba0 100644
--- a/cmd/podman/restore.go
+++ b/cmd/podman/restore.go
@@ -27,6 +27,10 @@ var (
// dedicated state for container which are checkpointed.
// TODO: add ContainerStateCheckpointed
cli.BoolFlag{
+ Name: "tcp-established",
+ Usage: "checkpoint a container with established TCP connections",
+ },
+ cli.BoolFlag{
Name: "all, a",
Usage: "restore all checkpointed containers",
},
@@ -53,16 +57,19 @@ func restoreCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- keep := c.Bool("keep")
+ options := libpod.ContainerCheckpointOptions{
+ Keep: c.Bool("keep"),
+ TCPEstablished: c.Bool("tcp-established"),
+ }
if err := checkAllAndLatest(c); err != nil {
return err
}
- containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "checkpointed")
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateExited, "checkpointed")
for _, ctr := range containers {
- if err = ctr.Restore(context.TODO(), keep); err != nil {
+ if err = ctr.Restore(context.TODO(), options); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 0fb5345ee..7c0569b78 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/libpod"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -46,9 +45,7 @@ Running containers will not be removed without the -f option.
// saveCmd saves the image to either docker-archive or oci
func rmCmd(c *cli.Context) error {
var (
- delContainers []*libpod.Container
- lastError error
- deleteFuncs []shared.ParallelWorkerInput
+ deleteFuncs []shared.ParallelWorkerInput
)
ctx := getContext()
@@ -65,7 +62,13 @@ func rmCmd(c *cli.Context) error {
return err
}
- delContainers, lastError = getAllOrLatestContainers(c, runtime, -1, "all")
+ delContainers, err := getAllOrLatestContainers(c, runtime, -1, "all")
+ if err != nil {
+ if len(delContainers) == 0 {
+ return err
+ }
+ fmt.Println(err.Error())
+ }
for _, container := range delContainers {
con := container
@@ -84,14 +87,7 @@ func rmCmd(c *cli.Context) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- deleteErrors := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
- for cid, result := range deleteErrors {
- if result != nil {
- fmt.Println(result.Error())
- lastError = result
- continue
- }
- fmt.Println(cid)
- }
- return lastError
+ // Run the parallel funcs
+ deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
+ return printParallelOutput(deleteErrors, errCount)
}
diff --git a/cmd/podman/rmi.go b/cmd/podman/rmi.go
index c0a0d69df..910c7ba35 100644
--- a/cmd/podman/rmi.go
+++ b/cmd/podman/rmi.go
@@ -46,6 +46,13 @@ var (
)
func rmiCmd(c *cli.Context) error {
+ var (
+ lastError error
+ deleted bool
+ deleteErr error
+ msg string
+ )
+
ctx := getContext()
if err := validateFlags(c, rmiFlags); err != nil {
return err
@@ -66,20 +73,18 @@ func rmiCmd(c *cli.Context) error {
}
images := args[:]
- var lastError error
- var deleted bool
removeImage := func(img *image.Image) {
deleted = true
- msg, err := runtime.RemoveImage(ctx, img, c.Bool("force"))
- if err != nil {
- if errors.Cause(err) == storage.ErrImageUsedByContainer {
+ msg, deleteErr = runtime.RemoveImage(ctx, img, c.Bool("force"))
+ if deleteErr != nil {
+ if errors.Cause(deleteErr) == storage.ErrImageUsedByContainer {
fmt.Printf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\n", img.ID())
}
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
- lastError = err
+ lastError = deleteErr
} else {
fmt.Println(msg)
}
@@ -91,8 +96,31 @@ func rmiCmd(c *cli.Context) error {
if err != nil {
return errors.Wrapf(err, "unable to query local images")
}
- for _, i := range imagesToDelete {
- removeImage(i)
+ lastNumberofImages := 0
+ for len(imagesToDelete) > 0 {
+ if lastNumberofImages == len(imagesToDelete) {
+ return errors.New("unable to delete all images; re-run the rmi command again.")
+ }
+ for _, i := range imagesToDelete {
+ isParent, err := i.IsParent()
+ if err != nil {
+ return err
+ }
+ if isParent {
+ continue
+ }
+ removeImage(i)
+ }
+ lastNumberofImages = len(imagesToDelete)
+ imagesToDelete, err = runtime.ImageRuntime().GetImages()
+ if err != nil {
+ return err
+ }
+ // If no images are left to delete or there is just one image left and it cannot be deleted,
+ // lets break out and display the error
+ if len(imagesToDelete) == 0 || (lastNumberofImages == 1 && lastError != nil) {
+ break
+ }
}
} else {
// Create image.image objects for deletion from user input.
diff --git a/cmd/podman/run.go b/cmd/podman/run.go
index af6ced45d..20cb85347 100644
--- a/cmd/podman/run.go
+++ b/cmd/podman/run.go
@@ -44,7 +44,7 @@ func runCmd(c *cli.Context) error {
rootless.SetSkipStorageSetup(true)
}
- runtime, err := libpodruntime.GetContainerRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(c)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
@@ -116,6 +116,11 @@ func runCmd(c *cli.Context) error {
if strings.Index(err.Error(), "permission denied") > -1 {
exitCode = 126
}
+ if c.IsSet("rm") {
+ if deleteError := runtime.RemoveContainer(ctx, ctr, true); deleteError != nil {
+ logrus.Errorf("unable to remove container %s after failing to start and attach to it", ctr.ID())
+ }
+ }
return err
}
diff --git a/cmd/podman/runlabel.go b/cmd/podman/runlabel.go
index e1dee1fb2..48a296260 100644
--- a/cmd/podman/runlabel.go
+++ b/cmd/podman/runlabel.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/image"
- "github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -94,7 +93,7 @@ func runlabelCmd(c *cli.Context) error {
imageName string
stdErr, stdOut io.Writer
stdIn io.Reader
- newImage *image.Image
+ extraArgs []string
)
// Evil images could trick into recursively executing the runlabel
@@ -124,6 +123,9 @@ func runlabelCmd(c *cli.Context) error {
return errors.Errorf("the display and quiet flags cannot be used together.")
}
+ if len(args) > 2 {
+ extraArgs = args[2:]
+ }
pull := c.Bool("pull")
label := args[0]
@@ -151,75 +153,26 @@ func runlabelCmd(c *cli.Context) error {
stdIn = nil
}
- if pull {
- var registryCreds *types.DockerAuthConfig
- if c.IsSet("creds") {
- creds, err := util.ParseRegistryCreds(c.String("creds"))
- if err != nil {
- return err
- }
- registryCreds = creds
- }
- dockerRegistryOptions := image.DockerRegistryOptions{
- DockerRegistryCreds: registryCreds,
- DockerCertPath: c.String("cert-dir"),
- DockerInsecureSkipTLSVerify: !c.BoolT("tls-verify"),
- }
- authfile := getAuthFile(c.String("authfile"))
-
- newImage, err = runtime.ImageRuntime().New(ctx, runlabelImage, c.String("signature-policy"), authfile, stdOut, &dockerRegistryOptions, image.SigningOptions{}, false, false)
- } else {
- newImage, err = runtime.ImageRuntime().NewFromLocal(runlabelImage)
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerCertPath: c.String("cert-dir"),
}
- if err != nil {
- return errors.Wrapf(err, "unable to find image")
+ if c.IsSet("tls-verify") {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
}
- if len(newImage.Names()) < 1 {
- imageName = newImage.ID()
- } else {
- imageName = newImage.Names()[0]
- }
-
- runLabel, err := newImage.GetLabel(ctx, label)
+ authfile := getAuthFile(c.String("authfile"))
+ runLabel, imageName, err := shared.GetRunlabel(label, runlabelImage, ctx, runtime, pull, c.String("creds"), dockerRegistryOptions, authfile, c.String("signature-policy"), stdOut)
if err != nil {
return err
}
-
- // If no label to execute, we return
if runLabel == "" {
return nil
}
- // The user provided extra arguments that need to be tacked onto the label's command
- if len(args) > 2 {
- runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(args[2:], " "))
- }
-
- cmd, err := shared.GenerateCommand(runLabel, imageName, c.String("name"))
+ cmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, c.String("name"), opts, extraArgs)
if err != nil {
- return errors.Wrapf(err, "unable to generate command")
- }
- env := shared.GenerateRunEnvironment(c.String("name"), imageName, opts)
- env = append(env, "PODMAN_RUNLABEL_NESTED=1")
-
- envmap := envSliceToMap(env)
-
- envmapper := func(k string) string {
- switch k {
- case "OPT1":
- return envmap["OPT1"]
- case "OPT2":
- return envmap["OPT2"]
- case "OPT3":
- return envmap["OPT3"]
- }
- return ""
+ return err
}
-
- newS := os.Expand(strings.Join(cmd, " "), envmapper)
- cmd = strings.Split(newS, " ")
-
if !c.Bool("quiet") {
fmt.Printf("Command: %s\n", strings.Join(cmd, " "))
if c.Bool("display") {
@@ -228,12 +181,3 @@ func runlabelCmd(c *cli.Context) error {
}
return utils.ExecCmdWithStdStreams(stdIn, stdOut, stdErr, env, cmd[0], cmd[1:]...)
}
-
-func envSliceToMap(env []string) map[string]string {
- m := make(map[string]string)
- for _, i := range env {
- split := strings.Split(i, "=")
- m[split[0]] = strings.Join(split[1:], " ")
- }
- return m
-}
diff --git a/cmd/podman/save.go b/cmd/podman/save.go
index 7edc42e0d..139f3918a 100644
--- a/cmd/podman/save.go
+++ b/cmd/podman/save.go
@@ -146,7 +146,7 @@ func saveCmd(c *cli.Context) error {
return err
}
}
- if err := newImage.PushImageToReference(getContext(), destRef, manifestType, "", "", writer, c.Bool("compress"), libpodImage.SigningOptions{}, &libpodImage.DockerRegistryOptions{}, false, additionaltags); err != nil {
+ if err := newImage.PushImageToReference(getContext(), destRef, manifestType, "", "", writer, c.Bool("compress"), libpodImage.SigningOptions{}, &libpodImage.DockerRegistryOptions{}, additionaltags); err != nil {
if err2 := os.Remove(output); err2 != nil {
logrus.Errorf("error deleting %q: %v", output, err)
}
diff --git a/cmd/podman/search.go b/cmd/podman/search.go
index fa11dad32..442ebb57f 100644
--- a/cmd/podman/search.go
+++ b/cmd/podman/search.go
@@ -7,6 +7,7 @@ import (
"strings"
"github.com/containers/image/docker"
+ "github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/libpod/common"
sysreg "github.com/containers/libpod/pkg/registries"
@@ -72,11 +73,12 @@ type searchParams struct {
}
type searchOpts struct {
- filter []string
- limit int
- noTrunc bool
- format string
- authfile string
+ filter []string
+ limit int
+ noTrunc bool
+ format string
+ authfile string
+ insecureSkipTLSVerify types.OptionalBool
}
type searchFilterParams struct {
@@ -116,7 +118,10 @@ func searchCmd(c *cli.Context) error {
filter: c.StringSlice("filter"),
authfile: getAuthFile(c.String("authfile")),
}
- regAndSkipTLS, err := getRegistriesAndSkipTLS(c, registry)
+ if c.IsSet("tls-verify") {
+ opts.insecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ }
+ registries, err := getRegistries(registry)
if err != nil {
return err
}
@@ -126,7 +131,7 @@ func searchCmd(c *cli.Context) error {
return err
}
- return generateSearchOutput(term, regAndSkipTLS, opts, *filter)
+ return generateSearchOutput(term, registries, opts, *filter)
}
func genSearchFormat(format string) string {
@@ -157,16 +162,8 @@ func (s *searchParams) headerMap() map[string]string {
return values
}
-// A function for finding which registries can skip TLS
-func getRegistriesAndSkipTLS(c *cli.Context, registry string) (map[string]bool, error) {
- // Variables for setting up Registry and TLSVerify
- tlsVerify := c.BoolT("tls-verify")
- forceSecure := false
-
- if c.IsSet("tls-verify") {
- forceSecure = c.BoolT("tls-verify")
- }
-
+// getRegistries returns the list of registries to search, depending on an optional registry specification
+func getRegistries(registry string) ([]string, error) {
var registries []string
if registry != "" {
registries = append(registries, registry)
@@ -177,35 +174,10 @@ func getRegistriesAndSkipTLS(c *cli.Context, registry string) (map[string]bool,
return nil, errors.Wrapf(err, "error getting registries to search")
}
}
- regAndSkipTLS := make(map[string]bool)
- // If tls-verify is set to false, allow insecure always.
- if !tlsVerify {
- for _, reg := range registries {
- regAndSkipTLS[reg] = true
- }
- } else {
- // initially set all registries to verify with TLS
- for _, reg := range registries {
- regAndSkipTLS[reg] = false
- }
- // if the user didn't allow nor disallow insecure registries, check to see if the registry is insecure
- if !forceSecure {
- insecureRegistries, err := sysreg.GetInsecureRegistries()
- if err != nil {
- return nil, errors.Wrapf(err, "error getting insecure registries to search")
- }
- for _, reg := range insecureRegistries {
- // if there are any insecure registries in registries, allow for HTTP
- if _, ok := regAndSkipTLS[reg]; ok {
- regAndSkipTLS[reg] = true
- }
- }
- }
- }
- return regAndSkipTLS, nil
+ return registries, nil
}
-func getSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts, filter searchFilterParams) ([]searchParams, error) {
+func getSearchOutput(term string, registries []string, opts searchOpts, filter searchFilterParams) ([]searchParams, error) {
// Max number of queries by default is 25
limit := maxQueries
if opts.limit != 0 {
@@ -213,10 +185,10 @@ func getSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts
}
sc := common.GetSystemContext("", opts.authfile, false)
+ sc.DockerInsecureSkipTLSVerify = opts.insecureSkipTLSVerify
+ sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
var paramsArr []searchParams
- for reg, skipTLS := range regAndSkipTLS {
- // set the SkipTLSVerify bool depending on the registry being searched through
- sc.DockerInsecureSkipTLSVerify = skipTLS
+ for _, reg := range registries {
results, err := docker.SearchRegistry(context.TODO(), sc, reg, term, limit)
if err != nil {
logrus.Errorf("error searching registry %q: %v", reg, err)
@@ -276,8 +248,8 @@ func getSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts
return paramsArr, nil
}
-func generateSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts, filter searchFilterParams) error {
- searchOutput, err := getSearchOutput(term, regAndSkipTLS, opts, filter)
+func generateSearchOutput(term string, registries []string, opts searchOpts, filter searchFilterParams) error {
+ searchOutput, err := getSearchOutput(term, registries, opts, filter)
if err != nil {
return err
}
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index 4404268d4..6236d19b4 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -1,10 +1,10 @@
package shared
import (
+ "context"
"encoding/json"
"fmt"
- "github.com/cri-o/ocicni/pkg/ocicni"
- "github.com/docker/go-units"
+ "io"
"os"
"path/filepath"
"regexp"
@@ -13,9 +13,14 @@ import (
"sync"
"time"
+ "github.com/containers/image/types"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/inspect"
cc "github.com/containers/libpod/pkg/spec"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/docker/go-units"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -40,6 +45,7 @@ type PsOptions struct {
Sort string
Label string
Namespace bool
+ Sync bool
}
// BatchContainerStruct is the return obkect from BatchContainer and contains
@@ -121,6 +127,12 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput
pso PsContainerOutput
)
batchErr := ctr.Batch(func(c *libpod.Container) error {
+ if opts.Sync {
+ if err := c.Sync(); err != nil {
+ return err
+ }
+ }
+
conState, err = c.State()
if err != nil {
return errors.Wrapf(err, "unable to obtain container state")
@@ -589,3 +601,79 @@ func portsToString(ports []ocicni.PortMapping) string {
}
return strings.Join(portDisplay, ", ")
}
+
+// GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the
+// contruction of the runlabel output and environment variables
+func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) {
+ var (
+ newImage *image.Image
+ err error
+ imageName string
+ )
+ if pull {
+ var registryCreds *types.DockerAuthConfig
+ if inputCreds != "" {
+ creds, err := util.ParseRegistryCreds(inputCreds)
+ if err != nil {
+ return "", "", err
+ }
+ registryCreds = creds
+ }
+ dockerRegistryOptions.DockerRegistryCreds = registryCreds
+ newImage, err = runtime.ImageRuntime().New(ctx, runlabelImage, signaturePolicyPath, authfile, output, &dockerRegistryOptions, image.SigningOptions{}, false)
+ } else {
+ newImage, err = runtime.ImageRuntime().NewFromLocal(runlabelImage)
+ }
+ if err != nil {
+ return "", "", errors.Wrapf(err, "unable to find image")
+ }
+
+ if len(newImage.Names()) < 1 {
+ imageName = newImage.ID()
+ } else {
+ imageName = newImage.Names()[0]
+ }
+
+ runLabel, err := newImage.GetLabel(ctx, label)
+ return runLabel, imageName, err
+}
+
+// GenerateRunlabelCommand generates the command that will eventually be execucted by podman
+func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string) ([]string, []string, error) {
+ // The user provided extra arguments that need to be tacked onto the label's command
+ if len(extraArgs) > 0 {
+ runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " "))
+ }
+ cmd, err := GenerateCommand(runLabel, imageName, name)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "unable to generate command")
+ }
+ env := GenerateRunEnvironment(name, imageName, opts)
+ env = append(env, "PODMAN_RUNLABEL_NESTED=1")
+
+ envmap := envSliceToMap(env)
+
+ envmapper := func(k string) string {
+ switch k {
+ case "OPT1":
+ return envmap["OPT1"]
+ case "OPT2":
+ return envmap["OPT2"]
+ case "OPT3":
+ return envmap["OPT3"]
+ }
+ return ""
+ }
+ newS := os.Expand(strings.Join(cmd, " "), envmapper)
+ cmd = strings.Split(newS, " ")
+ return cmd, env, nil
+}
+
+func envSliceToMap(env []string) map[string]string {
+ m := make(map[string]string)
+ for _, i := range env {
+ split := strings.Split(i, "=")
+ m[split[0]] = strings.Join(split[1:], " ")
+ }
+ return m
+}
diff --git a/cmd/podman/shared/funcs.go b/cmd/podman/shared/funcs.go
index a92e0d547..8770b8ec0 100644
--- a/cmd/podman/shared/funcs.go
+++ b/cmd/podman/shared/funcs.go
@@ -5,13 +5,28 @@ import (
"os"
"path/filepath"
"strings"
+
+ "github.com/google/shlex"
)
func substituteCommand(cmd string) (string, error) {
+ var (
+ newCommand string
+ )
+
+ // Replace cmd with "/proc/self/exe" if "podman" or "docker" is being
+ // used. If "/usr/bin/docker" is provided, we also sub in podman.
+ // Otherwise, leave the command unchanged.
+ if cmd == "podman" || filepath.Base(cmd) == "docker" {
+ newCommand = "/proc/self/exe"
+ } else {
+ newCommand = cmd
+ }
+
// If cmd is an absolute or relative path, check if the file exists.
// Throw an error if it doesn't exist.
- if strings.Contains(cmd, "/") || strings.HasPrefix(cmd, ".") {
- res, err := filepath.Abs(cmd)
+ if strings.Contains(newCommand, "/") || strings.HasPrefix(newCommand, ".") {
+ res, err := filepath.Abs(newCommand)
if err != nil {
return "", err
}
@@ -22,16 +37,7 @@ func substituteCommand(cmd string) (string, error) {
}
}
- // Replace cmd with "/proc/self/exe" if "podman" or "docker" is being
- // used. Otherwise, leave the command unchanged.
- switch cmd {
- case "podman":
- fallthrough
- case "docker":
- return "/proc/self/exe", nil
- default:
- return cmd, nil
- }
+ return newCommand, nil
}
// GenerateCommand takes a label (string) and converts it to an executable command
@@ -42,7 +48,11 @@ func GenerateCommand(command, imageName, name string) ([]string, error) {
if name == "" {
name = imageName
}
- cmd := strings.Split(command, " ")
+
+ cmd, err := shlex.Split(command)
+ if err != nil {
+ return nil, err
+ }
prog, err := substituteCommand(cmd[0])
if err != nil {
diff --git a/cmd/podman/shared/funcs_test.go b/cmd/podman/shared/funcs_test.go
index 596df84e8..7506b9d9c 100644
--- a/cmd/podman/shared/funcs_test.go
+++ b/cmd/podman/shared/funcs_test.go
@@ -18,10 +18,11 @@ var (
)
func TestGenerateCommand(t *testing.T) {
- inputCommand := "docker run -it --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE echo install"
- correctCommand := "/proc/self/exe run -it --name bar -e NAME=bar -e IMAGE=foo foo echo install"
+ inputCommand := "docker run -it --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE echo \"hello world\""
+ correctCommand := "/proc/self/exe run -it --name bar -e NAME=bar -e IMAGE=foo foo echo hello world"
newCommand, err := GenerateCommand(inputCommand, "foo", "bar")
assert.Nil(t, err)
+ assert.Equal(t, "hello world", newCommand[11])
assert.Equal(t, correctCommand, strings.Join(newCommand, " "))
}
@@ -108,8 +109,8 @@ func TestGenerateCommandNoSetName(t *testing.T) {
}
func TestGenerateCommandNoName(t *testing.T) {
- inputCommand := "docker run -it -e IMAGE=IMAGE IMAGE echo install"
- correctCommand := "/proc/self/exe run -it -e IMAGE=foo foo echo install"
+ inputCommand := "docker run -it -e IMAGE=IMAGE IMAGE echo install"
+ correctCommand := "/proc/self/exe run -it -e IMAGE=foo foo echo install"
newCommand, err := GenerateCommand(inputCommand, "foo", "")
assert.Nil(t, err)
assert.Equal(t, correctCommand, strings.Join(newCommand, " "))
diff --git a/cmd/podman/shared/parallel.go b/cmd/podman/shared/parallel.go
index 633781a45..e6ce50f95 100644
--- a/cmd/podman/shared/parallel.go
+++ b/cmd/podman/shared/parallel.go
@@ -30,9 +30,10 @@ func ParallelWorker(wg *sync.WaitGroup, jobs <-chan ParallelWorkerInput, results
// ParallelExecuteWorkerPool takes container jobs and performs them in parallel. The worker
// int determines how many workers/threads should be premade.
-func ParallelExecuteWorkerPool(workers int, functions []ParallelWorkerInput) map[string]error {
+func ParallelExecuteWorkerPool(workers int, functions []ParallelWorkerInput) (map[string]error, int) {
var (
- wg sync.WaitGroup
+ wg sync.WaitGroup
+ errorCount int
)
resultChan := make(chan containerError, len(functions))
@@ -62,9 +63,12 @@ func ParallelExecuteWorkerPool(workers int, functions []ParallelWorkerInput) map
close(resultChan)
for ctrError := range resultChan {
results[ctrError.ContainerID] = ctrError.Err
+ if ctrError.Err != nil {
+ errorCount += 1
+ }
}
- return results
+ return results, errorCount
}
// Parallelize provides the maximum number of parallel workers (int) as calculated by a basic
diff --git a/cmd/podman/shared/pod.go b/cmd/podman/shared/pod.go
index 4e8e58c4d..30dd14845 100644
--- a/cmd/podman/shared/pod.go
+++ b/cmd/podman/shared/pod.go
@@ -1,7 +1,11 @@
package shared
import (
+ "strconv"
+
"github.com/containers/libpod/libpod"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/docker/go-connections/nat"
"github.com/pkg/errors"
)
@@ -95,3 +99,36 @@ func GetNamespaceOptions(ns []string) ([]libpod.PodCreateOption, error) {
}
return options, nil
}
+
+// CreatePortBindings iterates ports mappings and exposed ports into a format CNI understands
+func CreatePortBindings(ports []string) ([]ocicni.PortMapping, error) {
+ var portBindings []ocicni.PortMapping
+ // The conversion from []string to natBindings is temporary while mheon reworks the port
+ // deduplication code. Eventually that step will not be required.
+ _, natBindings, err := nat.ParsePortSpecs(ports)
+ if err != nil {
+ return nil, err
+ }
+ for containerPb, hostPb := range natBindings {
+ var pm ocicni.PortMapping
+ pm.ContainerPort = int32(containerPb.Int())
+ for _, i := range hostPb {
+ var hostPort int
+ var err error
+ pm.HostIP = i.HostIP
+ if i.HostPort == "" {
+ hostPort = containerPb.Int()
+ } else {
+ hostPort, err = strconv.Atoi(i.HostPort)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to convert host port to integer")
+ }
+ }
+
+ pm.HostPort = int32(hostPort)
+ pm.Protocol = containerPb.Proto()
+ portBindings = append(portBindings, pm)
+ }
+ }
+ return portBindings, nil
+}
diff --git a/cmd/podman/shared/prune.go b/cmd/podman/shared/prune.go
new file mode 100644
index 000000000..90cfe4475
--- /dev/null
+++ b/cmd/podman/shared/prune.go
@@ -0,0 +1,24 @@
+package shared
+
+import (
+ "fmt"
+ "github.com/pkg/errors"
+
+ "github.com/containers/libpod/libpod/image"
+)
+
+// Prune removes all unnamed and unused images from the local store
+func Prune(ir *image.Runtime) error {
+ pruneImages, err := ir.GetPruneImages()
+ if err != nil {
+ return err
+ }
+
+ for _, i := range pruneImages {
+ if err := i.Remove(true); err != nil {
+ return errors.Wrapf(err, "failed to remove %s", i.ID())
+ }
+ fmt.Println(i.ID())
+ }
+ return nil
+}
diff --git a/cmd/podman/start.go b/cmd/podman/start.go
index 8cf85405e..8bb386c68 100644
--- a/cmd/podman/start.go
+++ b/cmd/podman/start.go
@@ -1,11 +1,13 @@
package main
import (
+ "encoding/json"
"fmt"
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
+ cc "github.com/containers/libpod/pkg/spec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -132,6 +134,18 @@ func startCmd(c *cli.Context) error {
}
// Handle non-attach start
if err := ctr.Start(ctx); err != nil {
+ var createArtifact cc.CreateConfig
+ artifact, artifactErr := ctr.GetArtifact("create-config")
+ if artifactErr == nil {
+ if jsonErr := json.Unmarshal(artifact, &createArtifact); jsonErr != nil {
+ logrus.Errorf("unable to detect if container %s should be deleted", ctr.ID())
+ }
+ if createArtifact.Rm {
+ if rmErr := runtime.RemoveContainer(ctx, ctr, true); rmErr != nil {
+ logrus.Errorf("unable to remove container %s after it failed to start", ctr.ID())
+ }
+ }
+ }
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
diff --git a/cmd/podman/stop.go b/cmd/podman/stop.go
index cb36fd5cd..ade51705e 100644
--- a/cmd/podman/stop.go
+++ b/cmd/podman/stop.go
@@ -2,6 +2,7 @@ package main
import (
"fmt"
+
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
@@ -59,7 +60,13 @@ func stopCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+ containers, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+ if err != nil {
+ if len(containers) == 0 {
+ return err
+ }
+ fmt.Println(err.Error())
+ }
var stopFuncs []shared.ParallelWorkerInput
for _, ctr := range containers {
@@ -71,7 +78,11 @@ func stopCmd(c *cli.Context) error {
stopTimeout = ctr.StopTimeout()
}
f := func() error {
- return con.StopWithTimeout(stopTimeout)
+ if err := con.StopWithTimeout(stopTimeout); err != nil && errors.Cause(err) != libpod.ErrCtrStopped {
+ return err
+ }
+ return nil
+
}
stopFuncs = append(stopFuncs, shared.ParallelWorkerInput{
ContainerID: con.ID(),
@@ -85,17 +96,6 @@ func stopCmd(c *cli.Context) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- stopErrors := shared.ParallelExecuteWorkerPool(maxWorkers, stopFuncs)
-
- for cid, result := range stopErrors {
- if result != nil && result != libpod.ErrCtrStopped {
- if len(stopErrors) > 1 {
- fmt.Println(result.Error())
- }
- lastError = result
- continue
- }
- fmt.Println(cid)
- }
- return lastError
+ stopErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, stopFuncs)
+ return printParallelOutput(stopErrors, errCount)
}
diff --git a/cmd/podman/trust.go b/cmd/podman/trust.go
new file mode 100644
index 000000000..7c404cd3f
--- /dev/null
+++ b/cmd/podman/trust.go
@@ -0,0 +1,293 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "sort"
+
+ "github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/formats"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/trust"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+var (
+ setTrustFlags = []cli.Flag{
+ cli.StringFlag{
+ Name: "type, t",
+ Usage: "Trust type, accept values: signedBy(default), accept, reject.",
+ Value: "signedBy",
+ },
+ cli.StringSliceFlag{
+ Name: "pubkeysfile, f",
+ Usage: `Path of installed public key(s) to trust for TARGET.
+ Absolute path to keys is added to policy.json. May
+ used multiple times to define multiple public keys.
+ File(s) must exist before using this command.`,
+ },
+ cli.StringFlag{
+ Name: "policypath",
+ Hidden: true,
+ },
+ }
+ showTrustFlags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "raw",
+ Usage: "Output raw policy file",
+ },
+ cli.BoolFlag{
+ Name: "json, j",
+ Usage: "Output as json",
+ },
+ cli.StringFlag{
+ Name: "policypath",
+ Hidden: true,
+ },
+ cli.StringFlag{
+ Name: "registrypath",
+ Hidden: true,
+ },
+ }
+
+ setTrustDescription = "Set default trust policy or add a new trust policy for a registry"
+ setTrustCommand = cli.Command{
+ Name: "set",
+ Usage: "Set default trust policy or a new trust policy for a registry",
+ Description: setTrustDescription,
+ Flags: sortFlags(setTrustFlags),
+ ArgsUsage: "default | REGISTRY[/REPOSITORY]",
+ Action: setTrustCmd,
+ OnUsageError: usageErrorHandler,
+ }
+
+ showTrustDescription = "Display trust policy for the system"
+ showTrustCommand = cli.Command{
+ Name: "show",
+ Usage: "Display trust policy for the system",
+ Description: showTrustDescription,
+ Flags: sortFlags(showTrustFlags),
+ Action: showTrustCmd,
+ ArgsUsage: "",
+ UseShortOptionHandling: true,
+ OnUsageError: usageErrorHandler,
+ }
+
+ trustSubCommands = []cli.Command{
+ setTrustCommand,
+ showTrustCommand,
+ }
+
+ trustDescription = fmt.Sprintf(`Manages the trust policy of the host system. (%s)
+ Trust policy describes a registry scope that must be signed by public keys.`, getDefaultPolicyPath())
+ trustCommand = cli.Command{
+ Name: "trust",
+ Usage: "Manage container image trust policy",
+ Description: trustDescription,
+ ArgsUsage: "{set,show} ...",
+ Subcommands: trustSubCommands,
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+func showTrustCmd(c *cli.Context) error {
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not create runtime")
+ }
+
+ var (
+ policyPath string
+ systemRegistriesDirPath string
+ )
+ if c.IsSet("policypath") {
+ policyPath = c.String("policypath")
+ } else {
+ policyPath = trust.DefaultPolicyPath(runtime.SystemContext())
+ }
+ policyContent, err := ioutil.ReadFile(policyPath)
+ if err != nil {
+ return errors.Wrapf(err, "unable to read %s", policyPath)
+ }
+ if c.IsSet("registrypath") {
+ systemRegistriesDirPath = c.String("registrypath")
+ } else {
+ systemRegistriesDirPath = trust.RegistriesDirPath(runtime.SystemContext())
+ }
+
+ if c.Bool("raw") {
+ _, err := os.Stdout.Write(policyContent)
+ if err != nil {
+ return errors.Wrap(err, "could not read trust policies")
+ }
+ return nil
+ }
+
+ var policyContentStruct trust.PolicyContent
+ if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil {
+ return errors.Errorf("could not read trust policies")
+ }
+ policyJSON, err := trust.GetPolicyJSON(policyContentStruct, systemRegistriesDirPath)
+ if err != nil {
+ return errors.Wrapf(err, "error reading registry config file")
+ }
+ if c.Bool("json") {
+ var outjson interface{}
+ outjson = policyJSON
+ out := formats.JSONStruct{Output: outjson}
+ return formats.Writer(out).Out()
+ }
+
+ sortedRepos := sortPolicyJSONKey(policyJSON)
+ type policydefault struct {
+ Repo string
+ Trusttype string
+ GPGid string
+ Sigstore string
+ }
+ var policyoutput []policydefault
+ for _, repo := range sortedRepos {
+ repoval := policyJSON[repo]
+ var defaultstruct policydefault
+ defaultstruct.Repo = repo
+ if repoval["type"] != nil {
+ defaultstruct.Trusttype = trustTypeDescription(repoval["type"].(string))
+ }
+ if repoval["keys"] != nil && len(repoval["keys"].([]string)) > 0 {
+ defaultstruct.GPGid = trust.GetGPGId(repoval["keys"].([]string))
+ }
+ if repoval["sigstore"] != nil {
+ defaultstruct.Sigstore = repoval["sigstore"].(string)
+ }
+ policyoutput = append(policyoutput, defaultstruct)
+ }
+ var output []interface{}
+ for _, ele := range policyoutput {
+ output = append(output, interface{}(ele))
+ }
+ out := formats.StdoutTemplateArray{Output: output, Template: "{{.Repo}}\t{{.Trusttype}}\t{{.GPGid}}\t{{.Sigstore}}"}
+ return formats.Writer(out).Out()
+}
+
+func setTrustCmd(c *cli.Context) error {
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not create runtime")
+ }
+
+ args := c.Args()
+ if len(args) != 1 {
+ return errors.Errorf("default or a registry name must be specified")
+ }
+ valid, err := image.IsValidImageURI(args[0])
+ if err != nil || !valid {
+ return errors.Wrapf(err, "invalid image uri %s", args[0])
+ }
+
+ trusttype := c.String("type")
+ if !isValidTrustType(trusttype) {
+ return errors.Errorf("invalid choice: %s (choose from 'accept', 'reject', 'signedBy')", trusttype)
+ }
+ if trusttype == "accept" {
+ trusttype = "insecureAcceptAnything"
+ }
+
+ pubkeysfile := c.StringSlice("pubkeysfile")
+ if len(pubkeysfile) == 0 && trusttype == "signedBy" {
+ return errors.Errorf("At least one public key must be defined for type 'signedBy'")
+ }
+
+ var policyPath string
+ if c.IsSet("policypath") {
+ policyPath = c.String("policypath")
+ } else {
+ policyPath = trust.DefaultPolicyPath(runtime.SystemContext())
+ }
+ var policyContentStruct trust.PolicyContent
+ _, err = os.Stat(policyPath)
+ if !os.IsNotExist(err) {
+ policyContent, err := ioutil.ReadFile(policyPath)
+ if err != nil {
+ return errors.Wrapf(err, "unable to read %s", policyPath)
+ }
+ if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil {
+ return errors.Errorf("could not read trust policies")
+ }
+ }
+ var newReposContent []trust.RepoContent
+ if len(pubkeysfile) != 0 {
+ for _, filepath := range pubkeysfile {
+ newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype, KeyType: "GPGKeys", KeyPath: filepath})
+ }
+ } else {
+ newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype})
+ }
+ if args[0] == "default" {
+ policyContentStruct.Default = newReposContent
+ } else {
+ exists := false
+ for transport, transportval := range policyContentStruct.Transports {
+ _, exists = transportval[args[0]]
+ if exists {
+ policyContentStruct.Transports[transport][args[0]] = newReposContent
+ break
+ }
+ }
+ if !exists {
+ if policyContentStruct.Transports == nil {
+ policyContentStruct.Transports = make(map[string]trust.RepoMap)
+ }
+ if policyContentStruct.Transports["docker"] == nil {
+ policyContentStruct.Transports["docker"] = make(map[string][]trust.RepoContent)
+ }
+ policyContentStruct.Transports["docker"][args[0]] = append(policyContentStruct.Transports["docker"][args[0]], newReposContent...)
+ }
+ }
+
+ data, err := json.MarshalIndent(policyContentStruct, "", " ")
+ if err != nil {
+ return errors.Wrapf(err, "error setting trust policy")
+ }
+ err = ioutil.WriteFile(policyPath, data, 0644)
+ if err != nil {
+ return errors.Wrapf(err, "error setting trust policy")
+ }
+ return nil
+}
+
+var typeDescription = map[string]string{"insecureAcceptAnything": "accept", "signedBy": "signed", "reject": "reject"}
+
+func trustTypeDescription(trustType string) string {
+ trustDescription, exist := typeDescription[trustType]
+ if !exist {
+ logrus.Warnf("invalid trust type %s", trustType)
+ }
+ return trustDescription
+}
+
+func sortPolicyJSONKey(m map[string]map[string]interface{}) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func isValidTrustType(t string) bool {
+ if t == "accept" || t == "insecureAcceptAnything" || t == "reject" || t == "signedBy" {
+ return true
+ }
+ return false
+}
+
+func getDefaultPolicyPath() string {
+ return trust.DefaultPolicyPath(&types.SystemContext{})
+}
diff --git a/cmd/podman/unpause.go b/cmd/podman/unpause.go
index 648fc9d3d..d77e056f8 100644
--- a/cmd/podman/unpause.go
+++ b/cmd/podman/unpause.go
@@ -1,7 +1,6 @@
package main
import (
- "fmt"
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
@@ -37,7 +36,6 @@ var (
func unpauseCmd(c *cli.Context) error {
var (
- lastError error
unpauseContainers []*libpod.Container
unpauseFuncs []shared.ParallelWorkerInput
)
@@ -90,18 +88,6 @@ func unpauseCmd(c *cli.Context) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- unpauseErrors := shared.ParallelExecuteWorkerPool(maxWorkers, unpauseFuncs)
-
- for cid, result := range unpauseErrors {
- if result != nil && result != libpod.ErrCtrStopped {
- if len(unpauseErrors) > 1 {
- fmt.Println(result.Error())
- }
- lastError = result
- continue
- }
- fmt.Println(cid)
- }
-
- return lastError
+ unpauseErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, unpauseFuncs)
+ return printParallelOutput(unpauseErrors, errCount)
}
diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go
index afeccb668..a59535b43 100644
--- a/cmd/podman/utils.go
+++ b/cmd/podman/utils.go
@@ -3,6 +3,9 @@ package main
import (
"context"
"fmt"
+ "os"
+ gosignal "os/signal"
+
"github.com/containers/libpod/libpod"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/term"
@@ -11,8 +14,6 @@ import (
"github.com/urfave/cli"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/client-go/tools/remotecommand"
- "os"
- gosignal "os/signal"
)
type RawTtyFormatter struct {
@@ -207,3 +208,49 @@ func getPodsFromContext(c *cli.Context, r *libpod.Runtime) ([]*libpod.Pod, error
}
return pods, lastError
}
+
+func getVolumesFromContext(c *cli.Context, r *libpod.Runtime) ([]*libpod.Volume, error) {
+ args := c.Args()
+ var (
+ vols []*libpod.Volume
+ lastError error
+ err error
+ )
+
+ if c.Bool("all") {
+ vols, err = r.Volumes()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get all volumes")
+ }
+ }
+
+ for _, i := range args {
+ vol, err := r.GetVolume(i)
+ if err != nil {
+ if lastError != nil {
+ logrus.Errorf("%q", lastError)
+ }
+ lastError = errors.Wrapf(err, "unable to find volume %s", i)
+ continue
+ }
+ vols = append(vols, vol)
+ }
+ return vols, lastError
+}
+
+//printParallelOutput takes the map of parallel worker results and outputs them
+// to stdout
+func printParallelOutput(m map[string]error, errCount int) error {
+ var lastError error
+ for cid, result := range m {
+ if result != nil {
+ if errCount > 1 {
+ fmt.Println(result.Error())
+ }
+ lastError = result
+ continue
+ }
+ fmt.Println(cid)
+ }
+ return lastError
+}
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index 4a4a1854c..376bbc950 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -343,15 +343,17 @@ type ListPodContainerInfo (
)
# PodCreate is an input structure for creating pods.
-# It emulates options to podman pod create, however
-# changing pause image name and pause container
-# is not currently supported
+# It emulates options to podman pod create. The infraCommand and
+# infraImage options are currently NotSupported.
type PodCreate (
name: string,
cgroupParent: string,
labels: [string]string,
share: []string,
- infra: bool
+ infra: bool,
+ infraCommand: string,
+ infraImage: string,
+ publish: []string
)
# ListPodData is the returned struct for an individual pod
@@ -371,6 +373,22 @@ type PodContainerErrorData (
reason: string
)
+# Runlabel describes the required input for container runlabel
+type Runlabel(
+ image: string,
+ authfile: string,
+ certDir: string,
+ creds: string,
+ display: bool,
+ name: string,
+ pull: bool,
+ signaturePolicyPath: string,
+ tlsVerify: bool,
+ label: string,
+ extraArgs: []string,
+ opts: [string]string
+)
+
# Ping provides a response for developers to ensure their varlink setup is working.
# #### Example
# ~~~
@@ -449,6 +467,13 @@ method ListContainerChanges(name: string) -> (container: ContainerChanges)
# path representing the target tarfile. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)
# error will be returned.
# The return value is the written tarfile.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ExportContainer '{"name": "flamboyant_payne", "path": "/tmp/payne.tar" }'
+# {
+# "tarfile": "/tmp/payne.tar"
+# }
+# ~~~
method ExportContainer(name: string, path: string) -> (tarfile: string)
# GetContainerStats takes the name or ID of a container and returns a single ContainerStats structure which
@@ -565,6 +590,18 @@ method RemoveContainer(name: string, force: bool) -> (container: string)
# DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted
# container IDs. See also [RemoveContainer](RemoveContainer).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteStoppedContainers
+# {
+# "containers": [
+# "451410b931d00def8aa9b4f8084e4d4a39e5e04ea61f358cf53a5cf95afcdcee",
+# "8b60f754a3e01389494a9581ade97d35c2765b6e2f19acd2d3040c82a32d1bc0",
+# "cf2e99d4d3cad6073df199ed32bbe64b124f3e1aba6d78821aa8460e70d30084",
+# "db901a329587312366e5ecff583d08f0875b4b79294322df67d90fc6eed08fc1"
+# ]
+# }
+# ~~~
method DeleteStoppedContainers() -> (containers: []string)
# ListImages returns an array of ImageInList structures which provide basic information about
@@ -594,9 +631,10 @@ method InspectImage(name: string) -> (image: string)
method HistoryImage(name: string) -> (history: []ImageHistory)
# PushImage takes three input arguments: the name or ID of an image, the fully-qualified destination name of the image,
-# and a boolean as to whether tls-verify should be used. It will return an [ImageNotFound](#ImageNotFound) error if
+# and a boolean as to whether tls-verify should be used (with false disabling TLS, not affecting the default behavior).
+# It will return an [ImageNotFound](#ImageNotFound) error if
# the image cannot be found in local storage; otherwise the ID of the image will be returned on success.
-method PushImage(name: string, tag: string, tlsverify: bool) -> (image: string)
+method PushImage(name: string, tag: string, tlsverify: bool, signaturePolicy: string, creds: string, certDir: string, compress: bool, format: string, removeSignatures: bool, signBy: string) -> (image: string)
# TagImage takes the name or ID of an image in local storage as well as the desired tag name. If the image cannot
# be found, an [ImageNotFound](#ImageNotFound) error will be returned; otherwise, the ID of the image is returned on success.
@@ -621,6 +659,18 @@ method SearchImage(name: string, limit: int) -> (images: []ImageSearch)
# DeleteUnusedImages deletes any images not associated with a container. The IDs of the deleted images are returned
# in a string array.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteUnusedImages
+# {
+# "images": [
+# "166ea6588079559c724c15223f52927f514f73dd5c5cf2ae2d143e3b2e6e9b52",
+# "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
+# "3ef70f7291f47dfe2b82931a993e16f5a44a0e7a68034c3e0e086d77f5829adc",
+# "59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690"
+# ]
+# }
+# ~~~
method DeleteUnusedImages() -> (images: []string)
# Commit, creates an image from an existing container. It requires the name or
@@ -652,7 +702,7 @@ method ExportImage(name: string, destination: string, compress: bool, tags: []st
# "id": "426866d6fa419873f97e5cbd320eeb22778244c1dfffa01c944db3114f55772e"
# }
# ~~~
-method PullImage(name: string) -> (id: string)
+method PullImage(name: string, certDir: string, creds: string, signaturePolicy: string, tlsVerify: bool) -> (id: string)
# CreatePod creates a new empty pod. It uses a [PodCreate](#PodCreate) type for input.
# On success, the ID of the newly created pod will be returned.
@@ -672,11 +722,80 @@ method CreatePod(create: PodCreate) -> (pod: string)
# ListPods returns a list of pods in no particular order. They are
# returned as an array of ListPodData structs. See also [GetPod](#GetPod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListPods
+# {
+# "pods": [
+# {
+# "cgroup": "machine.slice",
+# "containersinfo": [
+# {
+# "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+# "name": "1840835294cf-infra",
+# "status": "running"
+# },
+# {
+# "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+# "name": "upbeat_murdock",
+# "status": "running"
+# }
+# ],
+# "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+# "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+# "name": "foobar",
+# "numberofcontainers": "2",
+# "status": "Running"
+# },
+# {
+# "cgroup": "machine.slice",
+# "containersinfo": [
+# {
+# "id": "1ca4b7bbba14a75ba00072d4b705c77f3df87db0109afaa44d50cb37c04a477e",
+# "name": "784306f655c6-infra",
+# "status": "running"
+# }
+# ],
+# "createdat": "2018-12-07 13:09:57.105112457 -0600 CST",
+# "id": "784306f655c6200aea321dd430ba685e9b2cc1f7d7528a72f3ff74ffb29485a2",
+# "name": "nostalgic_pike",
+# "numberofcontainers": "1",
+# "status": "Running"
+# }
+# ]
+# }
+# ~~~
method ListPods() -> (pods: []ListPodData)
# GetPod takes a name or ID of a pod and returns single [ListPodData](#ListPodData)
# structure. A [PodNotFound](#PodNotFound) error will be returned if the pod cannot be found.
# See also [ListPods](ListPods).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.GetPod '{"name": "foobar"}'
+# {
+# "pod": {
+# "cgroup": "machine.slice",
+# "containersinfo": [
+# {
+# "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+# "name": "1840835294cf-infra",
+# "status": "running"
+# },
+# {
+# "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+# "name": "upbeat_murdock",
+# "status": "running"
+# }
+# ],
+# "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+# "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+# "name": "foobar",
+# "numberofcontainers": "2",
+# "status": "Running"
+# }
+# }
+# ~~~
method GetPod(name: string) -> (pod: ListPodData)
# InspectPod takes the name or ID of an image and returns a string respresentation of data associated with the
@@ -698,7 +817,7 @@ method InspectPod(name: string) -> (pod: string)
# ~~~
method StartPod(name: string) -> (pod: string)
-# StopPod stops containers in a pod. It takes the name or ID of a pod.
+# StopPod stops containers in a pod. It takes the name or ID of a pod and a timeout.
# If the pod cannot be found, a [PodNotFound](#PodNotFound) error will be returned instead.
# Containers in a pod are stopped independently. If there is an error stopping one container, the ID of those containers
# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
@@ -711,7 +830,7 @@ method StartPod(name: string) -> (pod: string)
# "pod": "135d71b9495f7c3967f536edad57750bfdb569336cd107d8aabab45565ffcfb6"
# }
# ~~~
-method StopPod(name: string) -> (pod: string)
+method StopPod(name: string, timeout: int) -> (pod: string)
# RestartPod will restart containers in a pod given a pod name or ID. Containers in
# the pod that are running will be stopped, then all stopped containers will be run.
@@ -734,6 +853,13 @@ method RestartPod(name: string) -> (pod: string)
# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
# If the pod was killed with no errors, the pod ID is returned.
# See also [StopPod](StopPod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.KillPod '{"name": "foobar", "signal": 15}'
+# {
+# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+# }
+# ~~~
method KillPod(name: string, signal: int) -> (pod: string)
# PausePod takes the name or ID of a pod and pauses the running containers associated with it. If the pod cannot be found,
@@ -742,6 +868,13 @@ method KillPod(name: string, signal: int) -> (pod: string)
# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
# If the pod was paused with no errors, the pod ID is returned.
# See also [UnpausePod](#UnpausePod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.PausePod '{"name": "foobar"}'
+# {
+# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+# }
+# ~~~
method PausePod(name: string) -> (pod: string)
# UnpausePod takes the name or ID of a pod and unpauses the paused containers associated with it. If the pod cannot be
@@ -750,6 +883,13 @@ method PausePod(name: string) -> (pod: string)
# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
# If the pod was unpaused with no errors, the pod ID is returned.
# See also [PausePod](#PausePod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnpausePod '{"name": "foobar"}'
+# {
+# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+# }
+# ~~~
method UnpausePod(name: string) -> (pod: string)
# RemovePod takes the name or ID of a pod as well a boolean representing whether a running
@@ -804,6 +944,78 @@ method TopPod() -> (notimplemented: NotImplemented)
# ~~~
method GetPodStats(name: string) -> (pod: string, containers: []ContainerStats)
+# ImageExists talks a full or partial image ID or name and returns an int as to whether
+# the image exists in local storage. An int result of 0 means the image does exist in
+# local storage; whereas 1 indicates the image does not exists in local storage.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ImageExists '{"name": "imageddoesntexist"}'
+# {
+# "exists": 1
+# }
+# ~~~
+method ImageExists(name: string) -> (exists: int)
+
+# ContainerExists takes a full or partial container ID or name and returns an int as to
+# whether the container exists in local storage. A result of 0 means the container does
+# exists; whereas a result of 1 means it could not be found.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ContainerExists '{"name": "flamboyant_payne"}'{
+# "exists": 0
+# }
+# ~~~
+method ContainerExists(name: string) -> (exists: int)
+
+# ContainerCheckPoint performs a checkpopint on a container by its name or full/partial container
+# ID. On successful checkpoint, the id of the checkpointed container is returned.
+method ContainerCheckpoint(name: string, keep: bool, leaveRunning: bool, tcpEstablished: bool) -> (id: string)
+
+# ContainerRestore restores a container that has been checkpointed. The container to be restored can
+# be identified by its name or full/partial container ID. A successful restore will result in the return
+# of the container's ID.
+method ContainerRestore(name: string, keep: bool, tcpEstablished: bool) -> (id: string)
+
+# ContainerRunlabel runs executes a command as described by a given container image label.
+method ContainerRunlabel(runlabel: Runlabel) -> ()
+
+# ListContainerMounts gathers all the mounted container mount points and returns them as an array
+# of strings
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListContainerMounts
+# {
+# "mounts": [
+# "/var/lib/containers/storage/overlay/b215fb622c65ba3b06c6d2341be80b76a9de7ae415ce419e65228873d4f0dcc8/merged",
+# "/var/lib/containers/storage/overlay/5eaf806073f79c0ed9a695180ad598e34f963f7407da1d2ccf3560bdab49b26f/merged",
+# "/var/lib/containers/storage/overlay/1ecb6b1dbb251737c7a24a31869096839c3719d8b250bf075f75172ddcc701e1/merged",
+# "/var/lib/containers/storage/overlay/7137b28a3c422165fe920cba851f2f8da271c6b5908672c451ebda03ad3919e2/merged"
+# ]
+# }
+# ~~~
+method ListContainerMounts() -> (mounts: []string)
+
+# MountContainer mounts a container by name or full/partial ID. Upon a successful mount, the destination
+# mount is returned as a string.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.MountContainer '{"name": "jolly_shannon"}'{
+# "path": "/var/lib/containers/storage/overlay/419eeb04e783ea159149ced67d9fcfc15211084d65e894792a96bedfae0470ca/merged"
+# }
+# ~~~
+method MountContainer(name: string) -> (path: string)
+
+# UnmountContainer umounts a container by its name or full/partial container ID.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnmountContainer '{"name": "jolly_shannon", "force": false}'
+# {}
+# ~~~
+method UnmountContainer(name: string, force: bool) -> ()
+
+# This function is not implemented yet.
+method ListContainerPorts(name: string) -> (notimplemented: NotImplemented)
+
# ImageNotFound means the image could not be found by the provided name or ID in local storage.
error ImageNotFound (name: string)
diff --git a/cmd/podman/version.go b/cmd/podman/version.go
index d80f24a14..d81deb696 100644
--- a/cmd/podman/version.go
+++ b/cmd/podman/version.go
@@ -4,6 +4,7 @@ import (
"fmt"
"time"
+ "github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
"github.com/urfave/cli"
@@ -15,6 +16,19 @@ func versionCmd(c *cli.Context) error {
if err != nil {
errors.Wrapf(err, "unable to determine version")
}
+
+ versionOutputFormat := c.String("format")
+ if versionOutputFormat != "" {
+ var out formats.Writer
+ switch versionOutputFormat {
+ case formats.JSONString:
+ out = formats.JSONStruct{Output: output}
+ default:
+ out = formats.StdoutTemplate{Output: output, Template: versionOutputFormat}
+ }
+ formats.Writer(out).Out()
+ return nil
+ }
fmt.Println("Version: ", output.Version)
fmt.Println("Go Version: ", output.GoVersion)
if output.GitCommit != "" {
@@ -30,8 +44,17 @@ func versionCmd(c *cli.Context) error {
}
// Cli command to print out the full version of podman
-var versionCommand = cli.Command{
- Name: "version",
- Usage: "Display the PODMAN Version Information",
- Action: versionCmd,
-}
+var (
+ versionCommand = cli.Command{
+ Name: "version",
+ Usage: "Display the Podman Version Information",
+ Action: versionCmd,
+ Flags: versionFlags,
+ }
+ versionFlags = []cli.Flag{
+ cli.StringFlag{
+ Name: "format",
+ Usage: "Change the output format to JSON or a Go template",
+ },
+ }
+)
diff --git a/cmd/podman/volume.go b/cmd/podman/volume.go
new file mode 100644
index 000000000..913592e74
--- /dev/null
+++ b/cmd/podman/volume.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ "github.com/urfave/cli"
+)
+
+var (
+ volumeDescription = `Manage volumes.
+
+Volumes are created in and can be shared between containers.`
+
+ volumeSubCommands = []cli.Command{
+ volumeCreateCommand,
+ volumeLsCommand,
+ volumeRmCommand,
+ volumeInspectCommand,
+ volumePruneCommand,
+ }
+ volumeCommand = cli.Command{
+ Name: "volume",
+ Usage: "Manage volumes",
+ Description: volumeDescription,
+ UseShortOptionHandling: true,
+ Subcommands: volumeSubCommands,
+ }
+)
diff --git a/cmd/podman/volume_create.go b/cmd/podman/volume_create.go
new file mode 100644
index 000000000..0b5f8d1e3
--- /dev/null
+++ b/cmd/podman/volume_create.go
@@ -0,0 +1,97 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+)
+
+var volumeCreateDescription = `
+podman volume create
+
+Creates a new volume. If using the default driver, "local", the volume will
+be created at.`
+
+var volumeCreateFlags = []cli.Flag{
+ cli.StringFlag{
+ Name: "driver",
+ Usage: "Specify volume driver name (default local)",
+ },
+ cli.StringSliceFlag{
+ Name: "label, l",
+ Usage: "Set metadata for a volume (default [])",
+ },
+ cli.StringSliceFlag{
+ Name: "opt, o",
+ Usage: "Set driver specific options (default [])",
+ },
+}
+
+var volumeCreateCommand = cli.Command{
+ Name: "create",
+ Usage: "Create a new volume",
+ Description: volumeCreateDescription,
+ Flags: volumeCreateFlags,
+ Action: volumeCreateCmd,
+ SkipArgReorder: true,
+ ArgsUsage: "[VOLUME-NAME]",
+ UseShortOptionHandling: true,
+}
+
+func volumeCreateCmd(c *cli.Context) error {
+ var (
+ options []libpod.VolumeCreateOption
+ err error
+ volName string
+ )
+
+ if err = validateFlags(c, volumeCreateFlags); err != nil {
+ return err
+ }
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ if len(c.Args()) > 1 {
+ return errors.Errorf("too many arguments, create takes at most 1 argument")
+ }
+
+ if len(c.Args()) > 0 {
+ volName = c.Args()[0]
+ options = append(options, libpod.WithVolumeName(volName))
+ }
+
+ if c.IsSet("driver") {
+ options = append(options, libpod.WithVolumeDriver(c.String("driver")))
+ }
+
+ labels, err := getAllLabels([]string{}, c.StringSlice("label"))
+ if err != nil {
+ return errors.Wrapf(err, "unable to process labels")
+ }
+ if len(labels) != 0 {
+ options = append(options, libpod.WithVolumeLabels(labels))
+ }
+
+ opts, err := getAllLabels([]string{}, c.StringSlice("opt"))
+ if err != nil {
+ return errors.Wrapf(err, "unable to process options")
+ }
+ if len(options) != 0 {
+ options = append(options, libpod.WithVolumeOptions(opts))
+ }
+
+ vol, err := runtime.NewVolume(getContext(), options...)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", vol.Name())
+
+ return nil
+}
diff --git a/cmd/podman/volume_inspect.go b/cmd/podman/volume_inspect.go
new file mode 100644
index 000000000..152f1d098
--- /dev/null
+++ b/cmd/podman/volume_inspect.go
@@ -0,0 +1,63 @@
+package main
+
+import (
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+var volumeInspectDescription = `
+podman volume inspect
+
+Display detailed information on one or more volumes. Can change the format
+from JSON to a Go template.
+`
+
+var volumeInspectFlags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "all, a",
+ Usage: "Inspect all volumes",
+ },
+ cli.StringFlag{
+ Name: "format, f",
+ Usage: "Format volume output using Go template",
+ Value: "json",
+ },
+}
+
+var volumeInspectCommand = cli.Command{
+ Name: "inspect",
+ Usage: "Display detailed information on one or more volumes",
+ Description: volumeInspectDescription,
+ Flags: volumeInspectFlags,
+ Action: volumeInspectCmd,
+ SkipArgReorder: true,
+ ArgsUsage: "[VOLUME-NAME ...]",
+ UseShortOptionHandling: true,
+}
+
+func volumeInspectCmd(c *cli.Context) error {
+ var err error
+
+ if err = validateFlags(c, volumeInspectFlags); err != nil {
+ return err
+ }
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ opts := volumeLsOptions{
+ Format: c.String("format"),
+ }
+
+ vols, lastError := getVolumesFromContext(c, runtime)
+ if lastError != nil {
+ logrus.Errorf("%q", lastError)
+ }
+
+ return generateVolLsOutput(vols, opts, runtime)
+}
diff --git a/cmd/podman/volume_ls.go b/cmd/podman/volume_ls.go
new file mode 100644
index 000000000..0f94549ee
--- /dev/null
+++ b/cmd/podman/volume_ls.go
@@ -0,0 +1,308 @@
+package main
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/containers/libpod/cmd/podman/formats"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+)
+
+// volumeOptions is the "ls" command options
+type volumeLsOptions struct {
+ Format string
+ Quiet bool
+}
+
+// volumeLsTemplateParams is the template parameters to list the volumes
+type volumeLsTemplateParams struct {
+ Name string
+ Labels string
+ MountPoint string
+ Driver string
+ Options string
+ Scope string
+}
+
+// volumeLsJSONParams is the JSON parameters to list the volumes
+type volumeLsJSONParams struct {
+ Name string `json:"name"`
+ Labels map[string]string `json:"labels"`
+ MountPoint string `json:"mountPoint"`
+ Driver string `json:"driver"`
+ Options map[string]string `json:"options"`
+ Scope string `json:"scope"`
+}
+
+var volumeLsDescription = `
+podman volume ls
+
+List all available volumes. The output of the volumes can be filtered
+and the output format can be changed to JSON or a user specified Go template.
+`
+
+var volumeLsFlags = []cli.Flag{
+ cli.StringFlag{
+ Name: "filter, f",
+ Usage: "Filter volume output",
+ },
+ cli.StringFlag{
+ Name: "format",
+ Usage: "Format volume output using Go template",
+ Value: "table {{.Driver}}\t{{.Name}}",
+ },
+ cli.BoolFlag{
+ Name: "quiet, q",
+ Usage: "Print volume output in quiet mode",
+ },
+}
+
+var volumeLsCommand = cli.Command{
+ Name: "ls",
+ Aliases: []string{"list"},
+ Usage: "List volumes",
+ Description: volumeLsDescription,
+ Flags: volumeLsFlags,
+ Action: volumeLsCmd,
+ SkipArgReorder: true,
+ UseShortOptionHandling: true,
+}
+
+func volumeLsCmd(c *cli.Context) error {
+ if err := validateFlags(c, volumeLsFlags); err != nil {
+ return err
+ }
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ if len(c.Args()) > 0 {
+ return errors.Errorf("too many arguments, ls takes no arguments")
+ }
+
+ opts := volumeLsOptions{
+ Quiet: c.Bool("quiet"),
+ }
+ opts.Format = genVolLsFormat(c)
+
+ // Get the filter functions based on any filters set
+ var filterFuncs []libpod.VolumeFilter
+ if c.String("filter") != "" {
+ filters := strings.Split(c.String("filter"), ",")
+ for _, f := range filters {
+ filterSplit := strings.Split(f, "=")
+ if len(filterSplit) < 2 {
+ return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
+ }
+ generatedFunc, err := generateVolumeFilterFuncs(filterSplit[0], filterSplit[1], runtime)
+ if err != nil {
+ return errors.Wrapf(err, "invalid filter")
+ }
+ filterFuncs = append(filterFuncs, generatedFunc)
+ }
+ }
+
+ volumes, err := runtime.GetAllVolumes()
+ if err != nil {
+ return err
+ }
+
+ // Get the volumes that match the filter
+ volsFiltered := make([]*libpod.Volume, 0, len(volumes))
+ for _, vol := range volumes {
+ include := true
+ for _, filter := range filterFuncs {
+ include = include && filter(vol)
+ }
+
+ if include {
+ volsFiltered = append(volsFiltered, vol)
+ }
+ }
+ return generateVolLsOutput(volsFiltered, opts, runtime)
+}
+
+// generate the template based on conditions given
+func genVolLsFormat(c *cli.Context) string {
+ var format string
+ if c.String("format") != "" {
+ // "\t" from the command line is not being recognized as a tab
+ // replacing the string "\t" to a tab character if the user passes in "\t"
+ format = strings.Replace(c.String("format"), `\t`, "\t", -1)
+ }
+ if c.Bool("quiet") {
+ format = "{{.Name}}"
+ }
+ return format
+}
+
+// Convert output to genericParams for printing
+func volLsToGeneric(templParams []volumeLsTemplateParams, JSONParams []volumeLsJSONParams) (genericParams []interface{}) {
+ if len(templParams) > 0 {
+ for _, v := range templParams {
+ genericParams = append(genericParams, interface{}(v))
+ }
+ return
+ }
+ for _, v := range JSONParams {
+ genericParams = append(genericParams, interface{}(v))
+ }
+ return
+}
+
+// generate the accurate header based on template given
+func (vol *volumeLsTemplateParams) volHeaderMap() map[string]string {
+ v := reflect.Indirect(reflect.ValueOf(vol))
+ values := make(map[string]string)
+
+ for i := 0; i < v.NumField(); i++ {
+ key := v.Type().Field(i).Name
+ value := key
+ if value == "Name" {
+ value = "Volume" + value
+ }
+ values[key] = strings.ToUpper(splitCamelCase(value))
+ }
+ return values
+}
+
+// getVolTemplateOutput returns all the volumes in the volumeLsTemplateParams format
+func getVolTemplateOutput(lsParams []volumeLsJSONParams, opts volumeLsOptions) ([]volumeLsTemplateParams, error) {
+ var lsOutput []volumeLsTemplateParams
+
+ for _, lsParam := range lsParams {
+ var (
+ labels string
+ options string
+ )
+
+ for k, v := range lsParam.Labels {
+ label := k
+ if v != "" {
+ label += "=" + v
+ }
+ labels += label
+ }
+ for k, v := range lsParam.Options {
+ option := k
+ if v != "" {
+ option += "=" + v
+ }
+ options += option
+ }
+ params := volumeLsTemplateParams{
+ Name: lsParam.Name,
+ Driver: lsParam.Driver,
+ MountPoint: lsParam.MountPoint,
+ Scope: lsParam.Scope,
+ Labels: labels,
+ Options: options,
+ }
+
+ lsOutput = append(lsOutput, params)
+ }
+ return lsOutput, nil
+}
+
+// getVolJSONParams returns the volumes in JSON format
+func getVolJSONParams(volumes []*libpod.Volume, opts volumeLsOptions, runtime *libpod.Runtime) ([]volumeLsJSONParams, error) {
+ var lsOutput []volumeLsJSONParams
+
+ for _, volume := range volumes {
+ params := volumeLsJSONParams{
+ Name: volume.Name(),
+ Labels: volume.Labels(),
+ MountPoint: volume.MountPoint(),
+ Driver: volume.Driver(),
+ Options: volume.Options(),
+ Scope: volume.Scope(),
+ }
+
+ lsOutput = append(lsOutput, params)
+ }
+ return lsOutput, nil
+}
+
+// generateVolLsOutput generates the output based on the format, JSON or Go Template, and prints it out
+func generateVolLsOutput(volumes []*libpod.Volume, opts volumeLsOptions, runtime *libpod.Runtime) error {
+ if len(volumes) == 0 && opts.Format != formats.JSONString {
+ return nil
+ }
+ lsOutput, err := getVolJSONParams(volumes, opts, runtime)
+ if err != nil {
+ return err
+ }
+ var out formats.Writer
+
+ switch opts.Format {
+ case formats.JSONString:
+ if err != nil {
+ return errors.Wrapf(err, "unable to create JSON for volume output")
+ }
+ out = formats.JSONStructArray{Output: volLsToGeneric([]volumeLsTemplateParams{}, lsOutput)}
+ default:
+ lsOutput, err := getVolTemplateOutput(lsOutput, opts)
+ if err != nil {
+ return errors.Wrapf(err, "unable to create volume output")
+ }
+ out = formats.StdoutTemplateArray{Output: volLsToGeneric(lsOutput, []volumeLsJSONParams{}), Template: opts.Format, Fields: lsOutput[0].volHeaderMap()}
+ }
+ return formats.Writer(out).Out()
+}
+
+// generateVolumeFilterFuncs returns the true if the volume matches the filter set, otherwise it returns false.
+func generateVolumeFilterFuncs(filter, filterValue string, runtime *libpod.Runtime) (func(volume *libpod.Volume) bool, error) {
+ switch filter {
+ case "name":
+ return func(v *libpod.Volume) bool {
+ return strings.Contains(v.Name(), filterValue)
+ }, nil
+ case "driver":
+ return func(v *libpod.Volume) bool {
+ return v.Driver() == filterValue
+ }, nil
+ case "scope":
+ return func(v *libpod.Volume) bool {
+ return v.Scope() == filterValue
+ }, nil
+ case "label":
+ filterArray := strings.SplitN(filterValue, "=", 2)
+ filterKey := filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ return func(v *libpod.Volume) bool {
+ for labelKey, labelValue := range v.Labels() {
+ if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ return true
+ }
+ }
+ return false
+ }, nil
+ case "opt":
+ filterArray := strings.SplitN(filterValue, "=", 2)
+ filterKey := filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ return func(v *libpod.Volume) bool {
+ for labelKey, labelValue := range v.Options() {
+ if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ return true
+ }
+ }
+ return false
+ }, nil
+ }
+ return nil, errors.Errorf("%s is an invalid filter", filter)
+}
diff --git a/cmd/podman/volume_prune.go b/cmd/podman/volume_prune.go
new file mode 100644
index 000000000..652c50f42
--- /dev/null
+++ b/cmd/podman/volume_prune.go
@@ -0,0 +1,86 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+var volumePruneDescription = `
+podman volume prune
+
+Remove all unused volumes. Will prompt for confirmation if not
+using force.
+`
+
+var volumePruneFlags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "force, f",
+ Usage: "Do not prompt for confirmation",
+ },
+}
+
+var volumePruneCommand = cli.Command{
+ Name: "prune",
+ Usage: "Remove all unused volumes",
+ Description: volumePruneDescription,
+ Flags: volumePruneFlags,
+ Action: volumePruneCmd,
+ SkipArgReorder: true,
+ UseShortOptionHandling: true,
+}
+
+func volumePruneCmd(c *cli.Context) error {
+ var lastError error
+
+ if err := validateFlags(c, volumePruneFlags); err != nil {
+ return err
+ }
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ ctx := getContext()
+
+ // Prompt for confirmation if --force is not set
+ if !c.Bool("force") {
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Println("WARNING! This will remove all volumes not used by at least one container.")
+ fmt.Print("Are you sure you want to continue? [y/N] ")
+ ans, err := reader.ReadString('\n')
+ if err != nil {
+ return errors.Wrapf(err, "error reading input")
+ }
+ if strings.ToLower(ans)[0] != 'y' {
+ return nil
+ }
+ }
+
+ volumes, err := runtime.GetAllVolumes()
+ if err != nil {
+ return err
+ }
+
+ for _, vol := range volumes {
+ err = runtime.RemoveVolume(ctx, vol, false, true)
+ if err == nil {
+ fmt.Println(vol.Name())
+ } else if err != libpod.ErrVolumeBeingUsed {
+ if lastError != nil {
+ logrus.Errorf("%q", lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to remove volume %q", vol.Name())
+ }
+ }
+ return lastError
+}
diff --git a/cmd/podman/volume_rm.go b/cmd/podman/volume_rm.go
new file mode 100644
index 000000000..3fb623624
--- /dev/null
+++ b/cmd/podman/volume_rm.go
@@ -0,0 +1,71 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+var volumeRmDescription = `
+podman volume rm
+
+Remove one or more existing volumes. Will only remove volumes that are
+not being used by any containers. To remove the volumes anyways, use the
+--force flag.
+`
+
+var volumeRmFlags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "all, a",
+ Usage: "Remove all volumes",
+ },
+ cli.BoolFlag{
+ Name: "force, f",
+ Usage: "Remove a volume by force, even if it is being used by a container",
+ },
+}
+
+var volumeRmCommand = cli.Command{
+ Name: "rm",
+ Aliases: []string{"remove"},
+ Usage: "Remove one or more volumes",
+ Description: volumeRmDescription,
+ Flags: volumeRmFlags,
+ Action: volumeRmCmd,
+ ArgsUsage: "[VOLUME-NAME ...]",
+ SkipArgReorder: true,
+ UseShortOptionHandling: true,
+}
+
+func volumeRmCmd(c *cli.Context) error {
+ var err error
+
+ if err = validateFlags(c, volumeRmFlags); err != nil {
+ return err
+ }
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ ctx := getContext()
+
+ vols, lastError := getVolumesFromContext(c, runtime)
+ for _, vol := range vols {
+ err = runtime.RemoveVolume(ctx, vol, c.Bool("force"), false)
+ if err != nil {
+ if lastError != nil {
+ logrus.Errorf("%q", lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to remove volume %q", vol.Name())
+ } else {
+ fmt.Println(vol.Name())
+ }
+ }
+ return lastError
+}
diff --git a/commands.md b/commands.md
index c84938e64..43796722f 100644
--- a/commands.md
+++ b/commands.md
@@ -16,6 +16,7 @@
| [podman-diff(1)](/docs/podman-diff.1.md) | Inspect changes on a container or image's filesystem |[![...](/docs/play.png)](https://asciinema.org/a/FXfWB9CKYFwYM4EfqW3NSZy1G)|
| [podman-exec(1)](/docs/podman-exec.1.md) | Execute a command in a running container
| [podman-export(1)](/docs/podman-export.1.md) | Export container's filesystem contents as a tar archive |[![...](/docs/play.png)](https://asciinema.org/a/913lBIRAg5hK8asyIhhkQVLtV)|
+| [podman-generate(1)](/docs/podman-generate.1.md) | Generate structured output based on Podman containers and pods | |
| [podman-history(1)](/docs/podman-history.1.md) | Shows the history of an image |[![...](/docs/play.png)](https://asciinema.org/a/bCvUQJ6DkxInMELZdc5DinNSx)|
| [podman-image(1)](/docs/podman-image.1.md) | Manage Images||
| [podman-images(1)](/docs/podman-images.1.md) | List images in local storage |[![...](/docs/play.png)](https://asciinema.org/a/133649)|
@@ -62,4 +63,9 @@
| [podman-unpause(1)](/docs/podman-unpause.1.md) | Unpause one or more running containers |[![...](/docs/play.png)](https://asciinema.org/a/141292)|
| [podman-varlink(1)](/docs/podman-varlink.1.md) | Run the varlink backend ||
| [podman-version(1)](/docs/podman-version.1.md) | Display the version information |[![...](/docs/play.png)](https://asciinema.org/a/mfrn61pjZT9Fc8L4NbfdSqfgu)|
+| [podman-volume-create(1)](/docs/podman-volume-create.1.md) | Create a volume ||
+| [podman-volume-inspect(1)](/docs/podman-volume-inspect.1.md) | Get detailed information on one or more volumes ||
+| [podman-volume-ls(1)](/docs/podman-volume-ls.1.md) | List all the available volumes ||
+| [podman-volume-rm(1)](/docs/podman-volume-rm.1.md) | Remove one or more volumes ||
+| [podman-volume-prune(1)](/docs/podman-volume-prune.1.md) | Remove all unused volumes ||
| [podman-wait(1)](/docs/podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes |[![...](/docs/play.png)](https://asciinema.org/a/QNPGKdjWuPgI96GcfkycQtah0)|
diff --git a/completions/bash/podman b/completions/bash/podman
index c029f893a..4702ae0e0 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -5,7 +5,7 @@ __podman_previous_extglob_setting=$(shopt -p extglob)
shopt -s extglob
__podman_q() {
- podman ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@"
+ podman ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@"
}
# __podman_containers returns a list of containers. Additional options to
@@ -232,7 +232,7 @@ __podman_services() {
fields='$2' # names only
shift
fi
- __podman_q service ls "$@" | awk "NR>1 {print $fields}"
+ __podman_q service ls "$@" | awk "NR>1 {print $fields}"
}
# __podman_complete_services applies completion of services based on the current
@@ -689,25 +689,42 @@ __podman_images() {
__podman_q images $images_args | awk "$awk_script" | grep -v '<none>$'
}
+# __podman_complete_volumes applies completion of volumes based on the current
+# value of `$cur` or the value of the optional first option `--cur`, if given.
+__podman_complete_volumes() {
+ local current="$cur"
+ if [ "$1" = "--cur" ] ; then
+ current="$2"
+ shift 2
+ fi
+ COMPREPLY=( $(compgen -W "$(__podman_volume "$@")" -- "$current") )
+}
+
+__podman_complete_volume_names() {
+ local names=( $(__podman_q volume ls --quiet) )
+ COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") )
+}
+
+
_podman_attach() {
local options_with_args="
--detach-keys
"
local boolean_options="
- --help
- -h
- --latest
- -l
- --no-stdin
- --sig-proxy
+ --help
+ -h
+ --latest
+ -l
+ --no-stdin
+ --sig-proxy
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
@@ -716,20 +733,26 @@ _podman_container_attach() {
}
_podman_container_checkpoint() {
- local options_with_args="
- --help -h
- "
local boolean_options="
- --keep
+ -a
+ --all
+ -h
+ --help
-k
+ --keep
+ -l
+ --latest
+ -R
+ --leave-running
+ --tcp-established
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
@@ -785,6 +808,8 @@ _podman_container_refresh() {
local options_with_args="
"
local boolean_options="
+ --help
+ -h
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -794,20 +819,24 @@ _podman_container_restart() {
}
_podman_container_restore() {
- local options_with_args="
- --help -h
- "
local boolean_options="
- --keep
- -k
+ -a
+ --all
+ -h
+ --help
+ -k
+ --keep
+ -l
+ --latest
+ --tcp-established
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_created
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_created
+ ;;
esac
}
@@ -815,10 +844,6 @@ _podman_container_rm() {
_podman_rm
}
-_podman_container_run() {
- _podman_run
-}
-
_podman_container_start() {
_podman_start
}
@@ -851,6 +876,25 @@ _podman_container_wait() {
_podman_wait
}
+_podman_generate() {
+ local boolean_options="
+ --help
+ -h
+ "
+ subcommands="
+ kube
+ "
+ __podman_subcommands "$subcommands $aliases" && return
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
+ ;;
+ esac
+}
_podman_container() {
local boolean_options="
--help
@@ -863,6 +907,7 @@ _podman_container() {
create
diff
exec
+ exists
export
inspect
kill
@@ -871,6 +916,7 @@ _podman_container() {
mount
pause
port
+ prune
refresh
restart
restore
@@ -921,29 +967,29 @@ _podman_commit() {
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
_podman_build() {
local boolean_options="
- --force-rm
- --help
- -h
- --layers
- --no-cache
- --pull
- --pull-always
- --quiet
- -q
- --rm
- --squash
- --tls-verify
+ --force-rm
+ --help
+ -h
+ --layers
+ --no-cache
+ --pull
+ --pull-always
+ --quiet
+ -q
+ --rm
+ --squash
+ --tls-verify
"
local options_with_args="
@@ -996,18 +1042,18 @@ _podman_build() {
local all_options="$options_with_args $boolean_options"
case "$prev" in
- --runtime)
- COMPREPLY=($(compgen -W 'runc runv' -- "$cur"))
- ;;
- $(__podman_to_extglob "$options_with_args"))
+ --runtime)
+ COMPREPLY=($(compgen -W 'runc runv' -- "$cur"))
+ ;;
+ $(__podman_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
esac
}
@@ -1016,16 +1062,18 @@ _podman_diff() {
--format
"
local boolean_options="
- "
+ --help
+ -h
+ "
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
@@ -1037,19 +1085,21 @@ _podman_exec() {
-u
"
local boolean_options="
- --latest
- -l
- --privileged
- --tty
- -t
+ --help
+ -h
+ --latest
+ -l
+ --privileged
+ --tty
+ -t
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
@@ -1059,14 +1109,16 @@ _podman_export() {
-o
"
local boolean_options="
+ --help
+ -h
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
@@ -1075,19 +1127,21 @@ _podman_history() {
--format
"
local boolean_options="
- --human -H
- --no-trunc
- --quiet -q
+ --help
+ -h
+ --human -H
+ --no-trunc
+ --quiet -q
"
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images --id
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_images --id
+ ;;
esac
}
@@ -1108,20 +1162,20 @@ _podman_import() {
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_list_images
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_list_images
+ ;;
esac
}
_podman_info() {
local boolean_options="
- --help
- -h
- --debug
+ --help
+ -h
+ --debug
"
local options_with_args="
--format
@@ -1130,12 +1184,12 @@ _podman_info() {
local all_options="$options_with_args $boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_list_images
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_list_images
+ ;;
esac
}
@@ -1191,14 +1245,16 @@ _podman_image() {
local boolean_options="
--help
-h
- "
+ "
subcommands="
build
+ exists
history
import
inspect
load
ls
+ prune
pull
push
rm
@@ -1222,20 +1278,20 @@ _podman_image() {
_podman_images() {
local boolean_options="
- -a
- --all
- --digests
- --digests
- -f
- --filter
- -h
- --help
- --no-trunc
- --notruncate
- -n
- --noheading
- -q
- --quiet
+ -a
+ --all
+ --digests
+ --digests
+ -f
+ --filter
+ -h
+ --help
+ --no-trunc
+ --notruncate
+ -n
+ --noheading
+ -q
+ --quiet
"
local options_with_args="
--format
@@ -1245,22 +1301,22 @@ _podman_images() {
local all_options="$options_with_args $boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images --id
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_images --id
+ ;;
esac
}
_podman_inspect() {
local boolean_options="
- --help
- -h
- --latest
- -l
- "
+ --help
+ -h
+ --latest
+ -l
+ "
local options_with_args="
--format
-f
@@ -1325,20 +1381,20 @@ _podman_kill() {
--signal -s
"
local boolean_options="
- --all
- -a
- --help
- -h
- --latest
- -l
+ --all
+ -a
+ --help
+ -h
+ --latest
+ -l
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
@@ -1348,22 +1404,24 @@ _podman_logs() {
--tail
"
local boolean_options="
- --follow
- -f
- --latest
- -l
- --timestamps
- -t
+ --follow
+ -f
+ --help
+ -h
+ --latest
+ -l
+ --timestamps
+ -t
"
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_list_containers
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_list_containers
+ ;;
esac
}
@@ -1375,10 +1433,13 @@ _podman_pull() {
--signature-policy
"
local boolean_options="
- --all-tags -a
- --quiet
- -q
- --tls-verify
+ --all-tags
+ -a
+ --help
+ -h
+ --quiet
+ -q
+ --tls-verify
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -1391,7 +1452,9 @@ _podman_search() {
--limit
"
local boolean_options="
- --no-trunc
+ --help
+ -h
+ --no-trunc
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -1402,56 +1465,58 @@ _podman_unmount() {
_podman_umount() {
local boolean_options="
- --all
- -a
- --force
- -f
- --help
- -h
- "
+ --all
+ -a
+ --help
+ -h
+ --force
+ -f
+ "
local options_with_args="
- "
+ "
local all_options="$options_with_args $boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
_podman_mount() {
local boolean_options="
- --help
- -h
- --notruncate
- "
+ --help
+ -h
+ --notruncate
+ "
local options_with_args="
- --format
- "
+ --format
+ "
local all_options="$options_with_args $boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
_podman_push() {
local boolean_options="
- --compress
- --quiet
- -q
- --remove-signatures
- --tls-verify
+ --compress
+ --help
+ -h
+ --quiet
+ -q
+ --remove-signatures
+ --tls-verify
"
local options_with_args="
@@ -1466,12 +1531,12 @@ _podman_push() {
local all_options="$options_with_args $boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images --id
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_images --id
+ ;;
esac
}
@@ -1553,16 +1618,17 @@ _podman_container_run() {
"
local boolean_options="
- --disable-content-trust=false
- --help
- --init
- --interactive -i
- --oom-kill-disable
- --privileged
- --publish-all -P
- --quiet
- --read-only
- --tty -t
+ --disable-content-trust=false
+ --help
+ -h
+ --init
+ --interactive -i
+ --oom-kill-disable
+ --privileged
+ --publish-all -P
+ --quiet
+ --read-only
+ --tty -t
"
if [ "$command" = "run" -o "$subcommand" = "run" ] ; then
@@ -1583,12 +1649,13 @@ _podman_container_run() {
fi
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images --id
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ return
+ ;;
+ *)
+ __podman_complete_images --id
+ ;;
esac
@@ -1771,33 +1838,38 @@ _podman_restart() {
--timeout -t
"
local boolean_options="
- --all
- -a
- --latest
- -l
- --running
- --timeout
- -t"
+ --all
+ -a
+ --help
+ -h
+ --latest
+ -l
+ --running
+ --timeout
+ -t
+ "
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
_podman_rm() {
local boolean_options="
- --all
- -a
- --force
- -f
- --latest
- -l
- --volumes
- -v
+ --all
+ -a
+ --force
+ -f
+ --help
+ -h
+ --latest
+ -l
+ --volumes
+ -v
"
local options_with_args="
@@ -1806,52 +1878,53 @@ _podman_rm() {
local all_options="$options_with_args $boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
_podman_rmi() {
local boolean_options="
- --help
- -h
- --force
- -f
- -a
- --all
+ --all
+ -a
+ --force
+ -f
+ --help
+ -h
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images --id
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_images --id
+ ;;
esac
}
_podman_stats() {
local boolean_options="
- --help
- --all
- -a
- --no-stream
- --format
- --no-reset
+ --all
+ -a
+ --help
+ -h
+ --no-stream
+ --format
+ --no-reset
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
@@ -1859,58 +1932,65 @@ _podman_tag() {
local options_with_args="
"
local boolean_options="
+ --help
+ -h
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_images
+ ;;
esac
}
__podman_top_descriptors() {
- podman top --list-descriptors
+ podman top --list-descriptors
}
__podman_complete_top_descriptors() {
- COMPREPLY=($(compgen -W "$(__podman_top_descriptors)" -- "$cur"))
+ COMPREPLY=($(compgen -W "$(__podman_top_descriptors)" -- "$cur"))
}
_podman_top() {
local options_with_args="
"
local boolean_options="
- --help
- -h
- --latest
- -l
+ --help
+ -h
+ --latest
+ -l
"
# podman-top works on only *one* container, which means that when we have
# three or more arguments, we can complete with top descriptors.
if [[ "${COMP_CWORD}" -ge 3 ]]; then
- __podman_complete_top_descriptors
- return
+ __podman_complete_top_descriptors
+ return
fi
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
_podman_version() {
- local options_with_args="
- "
- local boolean_options="
- "
- _complete_ "$options_with_args" "$boolean_options"
+ local boolean_options="
+ --help
+ -h
+ "
+ local options_with_args="
+ --format
+ "
+ local all_options="$options_with_args $boolean_options"
+
+ _complete_ "$options_with_args" "$boolean_options"
}
_podman_save() {
@@ -1919,56 +1999,61 @@ _podman_save() {
--format
"
local boolean_options="
- --compress
- -q
- --quiet
+ --compress
+ --help
+ -h
+ -q
+ --quiet
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images --id
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_images --id
+ ;;
esac
}
_podman_pause() {
local boolean_options="
- -a
- --all
+ --all
+ -a
+ --help
+ -h
"
local options_with_args="
- --help -h
"
local boolean_options=""
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
_podman_port() {
local options_with_args="
- --help -h
"
local boolean_options="
- --all
- -a
- -l
- --latest"
+ --all
+ -a
+ --help
+ -h
+ -l
+ --latest
+ "
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
@@ -1984,13 +2069,15 @@ _podman_ps() {
--sort
"
local boolean_options="
- --all -a
- --latest -l
- --no-trunc
- --pod
- --quiet -q
- --size -s
- --namespace --ns
+ --all -a
+ --help -h
+ --latest -l
+ --no-trunc
+ --pod -p
+ --quiet -q
+ --size -s
+ --namespace --ns
+ --sync
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -2001,23 +2088,23 @@ _podman_start() {
"
local boolean_options="
- -h
- --help
- -a
- --attach
- -i
- --interactive
- --latest
- -l
- --sig-proxy
+ --attach
+ -a
+ -h
+ --help
+ -i
+ --interactive
+ --latest
+ -l
+ --sig-proxy
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
_podman_stop() {
@@ -2025,64 +2112,70 @@ _podman_stop() {
--timeout -t
"
local boolean_options="
- --all
- -a
- --latest
- -l"
+ --all
+ -a
+ -h
+ --help
+ --latest
+ -l
+ "
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_running
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_running
+ ;;
esac
}
_podman_unpause() {
local boolean_options="
- -a
- --all
+ --all
+ -a
+ --help
+ -h
"
local options_with_args="
- --help -h
"
- local boolean_options=""
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_containers_unpauseable
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_containers_unpauseable
+ ;;
esac
}
_podman_varlink() {
local options_with_args="
- --help -h
--timeout -t
"
- local boolean_options=""
+ local boolean_options="
+ --help
+ -h
+ "
_complete_ "$options_with_args" "$boolean_options"
}
_podman_wait() {
local options_with_args=""
local boolean_options="
- --help
- -h
- -i
+ --help
+ -h
+ -i
-l
- --interval
- --latest"
+ --interval
+ --latest
+ "
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_container_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_container_names
+ ;;
esac
}
@@ -2109,8 +2202,10 @@ _podman_load() {
--signature-policy
"
local boolean_options="
- --quiet
- -q
+ --help
+ -h
+ --quiet
+ -q
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -2122,10 +2217,11 @@ _podman_login() {
--password
-p
--authfile
+ --get-login
"
local boolean_options="
- --help
- -h
+ --help
+ -h
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -2135,14 +2231,22 @@ _podman_logout() {
--authfile
"
local boolean_options="
- --all
- -a
- --help
- -h
+ --all
+ -a
+ --help
+ -h
"
_complete_ "$options_with_args" "$boolean_options"
}
+_podman_generate_kube() {
+ local options_with_args=""
+
+ local boolean_options="
+ -s
+ --service
+ "
+
_podman_container_runlabel() {
local options_with_args="
--authfile
@@ -2153,41 +2257,89 @@ _podman_container_runlabel() {
"
local boolean_options="
- --display
- --help
- -h
- -p
- --pull
- -q
- --quiet
- --tls-verify
+ --display
+ --help
+ -h
+ -p
+ --pull
+ -q
+ --quiet
+ --tls-verify
"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_images --id
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_images --id
+ ;;
esac
}
+_podman_images_prune() {
+ local options_with_args="
+ "
+
+ local boolean_options="
+ -h
+ --help
+ "
+}
+
+_podman_container_prune() {
+ local options_with_args="
+ "
+
+ local boolean_options="
+ -h
+ --help
+ "
+}
+
+_podman_container_exists() {
+ local options_with_args="
+ "
+
+ local boolean_options="
+ "
+}
+
+_podman_pod_exists() {
+ local options_with_args="
+ "
+
+ local boolean_options="
+ "
+}
+
+_podman_image_exists() {
+ local options_with_args="
+ "
+
+ local boolean_options="
+ "
+}
+
_podman_pod_create() {
local options_with_args="
--cgroup-parent
--infra-command
--infra-image
- --share
- --podidfile
--label-file
--label
-l
--name
+ --podidfile
+ --publish
+ -p
+ --share
"
local boolean_options="
- --infra
+ --help
+ -h
+ --infra
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -2197,21 +2349,23 @@ _podman_pod_kill() {
"
local boolean_options="
- --all
- -a
- --signal
- -s
- --latest
- -l
+ --all
+ -a
+ --help
+ -h
+ --signal
+ -s
+ --latest
+ -l
"
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_pod_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_pod_names
+ ;;
esac
}
@@ -2224,16 +2378,18 @@ __podman_pod_ps() {
"
local boolean_options="
- --cgroup
- --ctr-ids
- --ctr-names
- --ctr-status
- -q
- --quiet
- --no-trunc
- --labels
- -l
- --latest
+ --cgroup
+ --ctr-ids
+ --ctr-names
+ --ctr-status
+ --help
+ -h
+ -q
+ --quiet
+ --no-trunc
+ --labels
+ -l
+ --latest
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -2255,19 +2411,21 @@ _podman_pod_restart() {
"
local boolean_options="
- --all
- -a
- --latest
- -l
+ --all
+ -a
+ --help
+ -h
+ --latest
+ -l
"
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_pod_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_pod_names
+ ;;
esac
}
@@ -2276,21 +2434,23 @@ _podman_pod_rm() {
"
local boolean_options="
- -a
- --all
- -f
- --force
- --latest
- -l
+ -a
+ --all
+ --help
+ -h
+ -f
+ --force
+ --latest
+ -l
"
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_pod_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_pod_names
+ ;;
esac
}
@@ -2299,41 +2459,47 @@ _podman_pod_start() {
"
local boolean_options="
- --all
- -a
- --latest
- -l
+ --all
+ -a
+ --help
+ -h
+ --latest
+ -l
"
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_pod_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_pod_names
+ ;;
esac
}
_podman_pod_stop() {
local options_with_args="
+ -t
+ --timeout
"
local boolean_options="
- --all
- -a
- --cleanup
- --latest
- -l
+ --all
+ -a
+ --cleanup
+ --help
+ -h
+ --latest
+ -l
"
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
- ;;
- *)
- __podman_complete_pod_names
- ;;
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_pod_names
+ ;;
esac
}
@@ -2342,64 +2508,190 @@ _podman_pod_pause() {
"
local boolean_options="
+ --all
+ -a
+ --help
+ -h
+ --latest
+ -l
+ "
+ _complete_ "$options_with_args" "$boolean_options"
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_pod_names
+ ;;
+ esac
+}
+
+_podman_pod_unpause() {
+ local options_with_args="
+ "
+
+ local boolean_options="
+ --all
+ -a
+ --help
+ -h
+ --latest
+ -l
+ "
+ _complete_ "$options_with_args" "$boolean_options"
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __podman_complete_pod_names
+ ;;
+ esac
+}
+
+_podman_pod() {
+ local boolean_options="
+ --help
+ -h
+ "
+ subcommands="
+ create
+ kill
+ pause
+ ps
+ restart
+ rm
+ start
+ stats
+ stop
+ top
+ unpause
+ "
+ local aliases="
+ list
+ ls
+ "
+ __podman_subcommands "$subcommands $aliases" && return
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
+ ;;
+ esac
+}
+
+_podman_volume_create() {
+ local options_with_args="
+ --driver
+ --label
+ -l
+ --opt
+ -o
+ "
+
+ local boolean_options="
+ --help
+ -h
+ "
+
+ _complete_ "$options_with_args" "$boolean_options"
+}
+
+_podman_volume_ls() {
+ local options_with_args="
+ --filter
+ --format
+ -f
+ "
+
+ local boolean_options="
+ --help
+ -h
+ --quiet
+ -q
+ "
+
+ _complete_ "$options_with_args" "$boolean_options"
+}
+
+_podman_volume_inspect() {
+ local options_with_args="
+ --format
+ -f
+ "
+
+ local boolean_options="
--all
-a
- --latest
- -l
+ --help
+ -h
"
+
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
- __podman_complete_pod_names
+ __podman_complete_volume_names
;;
esac
}
-_podman_pod_unpause() {
- local options_with_args="
- "
+_podman_volume_rm() {
+ local options_with_args=""
local boolean_options="
--all
-a
- --latest
- -l
+ --force
+ -f
+ --help
+ -h
"
+
_complete_ "$options_with_args" "$boolean_options"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
- __podman_complete_pod_names
+ __podman_complete_volume_names
;;
esac
}
-_podman_pod() {
+_podman_volume_prune() {
+ local options_with_args=""
+
+ local boolean_options="
+ --force
+ -f
+ --help
+ -h
+ "
+
+ _complete_ "$options_with_args" "$boolean_options"
+}
+
+_podman_volume() {
local boolean_options="
--help
-h
"
subcommands="
create
- kill
- pause
- ps
- restart
+ inspect
+ ls
rm
- start
- stats
- stop
- top
- unpause
+ prune
"
local aliases="
list
- ls
+ remove
"
__podman_subcommands "$subcommands $aliases" && return
@@ -2415,19 +2707,21 @@ _podman_pod() {
_podman_podman() {
local options_with_args="
- --config -c
- --cpu-profile
- --root
- --runroot
- --storage-driver
- --storage-opt
- --log-level
- --namespace
+ --config -c
+ --cpu-profile
+ --root
+ --runroot
+ --storage-driver
+ --storage-opt
+ --log-level
+ --namespace
"
local boolean_options="
- --help -h
- --version -v
- --syslog
+ --help
+ -h
+ --version
+ -v
+ --syslog
"
commands="
attach
@@ -2438,6 +2732,7 @@ _podman_podman() {
diff
exec
export
+ generate
history
images
import
@@ -2471,6 +2766,7 @@ _podman_podman() {
unpause
varlink
version
+ volume
wait
"
diff --git a/contrib/cirrus/README.md b/contrib/cirrus/README.md
index 0d315c4f5..e175479f1 100644
--- a/contrib/cirrus/README.md
+++ b/contrib/cirrus/README.md
@@ -5,6 +5,7 @@
Similar to other integrated github CI/CD services, Cirrus utilizes a simple
YAML-based configuration/description file: ``.cirrus.yml``. Ref: https://cirrus-ci.org/
+
## Workflow
All tasks execute in parallel, unless there are conditions or dependencies
@@ -12,24 +13,34 @@ which alter this behavior. Within each task, each script executes in sequence,
so long as any previous script exited successfully. The overall state of each
task (pass or fail) is set based on the exit status of the last script to execute.
-### ``full_vm_testing`` Task
-1. Unconditionally, spin up one VM per ``matrix: image_name`` item defined
- in ``.cirrus.yml``. Once accessible, ``ssh`` into each VM and run the following
- scripts.
+### ``gating`` Task
+
+***N/B: Steps below are performed by automation***
+
+1. Launch a purpose-built container in Cirrus's community cluster.
+ For container image details, please see
+ [the contributors guide](https://github.com/containers/libpod/blob/master/CONTRIBUTING.md#go-format-and-lint).
+
+3. ``validate``: Perform standard `make validate` source verification,
+ Should run for less than a minute or two.
+
+4. ``lint``: Execute regular `make lint` to check for any code cruft.
+ Should also run for less than a few minutes.
-2. ``setup_environment.sh``: Configure root's ``.bash_profile``
- for all subsequent scripts (each run in a new shell). Any
- distribution-specific environment variables are also defined
- here. For example, setting tags/flags to use compiling.
-3. ``verify_source.sh``: Perform per-distribution source
- verification, lint-checking, etc. This acts as a minimal
- gate, blocking extended use of VMs when a PR's code or commits
- would otherwise not be accepted. Should run for less than a minute.
+### ``testing`` Task
-4. ``unit_test.sh``: Execute unit-testing, as defined by the ``Makefile``.
- This should execute within 10-minutes, but often much faster.
+***N/B: Steps below are performed by automation***
+
+1. After `gating` passes, spin up one VM per
+ `matrix: image_name` item. Once accessible, ``ssh``
+ into each VM as the `root` user.
+
+2. ``setup_environment.sh``: Configure root's `.bash_profile`
+ for all subsequent scripts (each run in a new shell). Any
+ distribution-specific environment variables are also defined
+ here. For example, setting tags/flags to use compiling.
5. ``integration_test.sh``: Execute integration-testing. This is
much more involved, and relies on access to external
@@ -37,39 +48,163 @@ task (pass or fail) is set based on the exit status of the last script to execut
Total execution time is capped at 2-hours (includes all the above)
but this script normally completes in less than an hour.
-### ``build_vm_images`` Task
-1. When a PR is merged (``$CIRRUS_BRANCH`` == ``master``), run another
- round of the ``full_vm_testing`` task (above).
+### ``optional_testing`` Task
+
+***N/B: Steps below are performed by automation***
+
+1. Optionally executes in parallel with ``testing``. Requires
+ **prior** to job-start, the magic string ``***CIRRUS: SYSTEM TEST***``
+ is found in the pull-request *description*. The *description* is the first
+ text-box under the main *summary* line in the github WebUI.
-2. After confirming the tests all pass post-merge, spin up a special VM
- capable of communicating with the GCE API. Once accessible, ``ssh`` into
- the special VM and run the following scripts.
+2. ``setup_environment.sh``: Same as for other tasks.
-3. ``setup_environment.sh``: Configure root's ``.bash_profile``
- for all subsequent scripts (each run in a new shell). Any
- distribution-specific environment variables are also defined
- here. For example, setting tags/flags to use compiling.
+3. ``system_test.sh``: Build both dependencies and libpod, install them,
+ then execute `make localsystem` from the repository root.
-4. ``build_vm_images.sh``: Examine the merged PR's description on github.
- If it contains the magic string ``***CIRRUS: REBUILD IMAGES***``, then
- continue. Otherwise display a message, take no further action, and
- exit successfully. This prevents production of new VM images unless
- they are called for, thereby saving the cost of needlessly storing them.
-5. If the magic string was found, utilize [the packer tool](http://packer.io/docs/)
+### ``cache_images`` Task
+
+Modifying the contents of cache-images is done by making changes to
+one or more of the ``./contrib/cirrus/packer/*_setup.sh`` files. Testing
+those changes currently requires adding a temporary commit to a PR that
+updates ``.cirrus.yml``:
+
+* Remove all task sections except ``cache_images_task``.
+* Remove the ``only_if`` condition and ``depends_on`` dependencies
+
+The new image names will be displayed at the end of output, assuming the build
+is successful, at that point the temporary commit may be removed. Finally,
+the new names may be used as ``image_name`` values in ``.cirrus.yml``.
+
+***N/B: Steps below are performed by automation***
+
+1. When a PR is merged (``$CIRRUS_BRANCH`` == ``master``), run another
+ round of the ``gating`` and ``testing`` tasks (above).
+
+2. Assuming tests pass, if the commit message contains the magic string
+ ``***CIRRUS: REBUILD IMAGES***``, then this task continues. Otherwise
+ simply mark the master branch as 'passed'.
+
+3. ``setup_environment.sh``: Same as for other tasks.
+
+4. ``build_vm_images.sh``: Utilize [the packer tool](http://packer.io/docs/)
to produce new VM images. Create a new VM from each base-image, connect
- to them with ``ssh``, and perform these steps as defined by the
- ``libpod_images.json`` file.
+ to them with ``ssh``, and perform the steps as defined by the
+ ``$PACKER_BASE/libpod_images.json`` file:
- 1. Copy the current state of the repository into ``/tmp/libpod``.
+ 1. On a base-image VM, as root, copy the current state of the repository
+ into ``/tmp/libpod``.
2. Execute distribution-specific scripts to prepare the image for
- use by the ``full_vm_testing`` task (above).
+ use by the ``integration_testing`` task (above). For example,
+ ``fedora_setup.sh``.
3. If successful, shut down each VM and create a new GCE Image
- named after the base image and the commit sha of the merge.
-
-***Note:*** The ``.cirrus.yml`` file must be manually updated with the new
-images names, then the change sent in via a secondary pull-request. This
-ensures that all the ``full_vm_testing`` tasks can pass with the new images,
-before subjecting all future PRs to them. A workflow to automate this
-process is described in comments at the end of the ``.cirrus.yml`` file.
+ named with the base image, and the commit sha of the merge.
+
+### Base-images
+
+Base-images are VM disk-images specially prepared for executing as GCE VMs.
+In particular, they run services on startup similar in purpose/function
+as the standard 'cloud-init' services.
+
+* The google services are required for full support of ssh-key management
+ and GCE OAuth capabilities. Google provides native images in GCE
+ with services pre-installed, for many platforms. For example,
+ RHEL, CentOS, and Ubuntu.
+
+* Google does ***not*** provide any images for Fedora or Fedora Atomic
+ Host (as of 11/2018), nor do they provide a base-image prepared to
+ run packer for creating other images in the ``build_vm_images`` Task
+ (above).
+
+* Base images do not need to be produced often, but doing so completely
+ manually would be time-consuming and error-prone. Therefor a special
+ semi-automatic *Makefile* target is provided to assist with producing
+ all the base-images: ``libpod_base_images``
+
+To produce new base-images, including an `image-builder-image` (used by
+the ``cache_images`` Task) some input parameters are required:
+
+* ``GCP_PROJECT_ID``: The complete GCP project ID string e.g. foobar-12345
+ identifying where the images will be stored.
+
+* ``GOOGLE_APPLICATION_CREDENTIALS``: A *JSON* file containing
+ credentials for a GCE service account. This can be [a service
+ account](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually)
+ or [end-user
+ credentials](https://cloud.google.com/docs/authentication/end-user#creating_your_client_credentials)
+
+* ``RHEL_IMAGE_FILE`` and ``RHEL_CSUM_FILE`` complete paths
+ to a `rhel-server-ec2-*.raw.xz` and it's cooresponding
+ checksum file. These must be supplied manually because
+ they're not available directly via URL like other images.
+
+* ``RHSM_COMMAND`` contains the complete string needed to register
+ the VM for installing package dependencies. The VM will be de-registered
+ upon completion.
+
+* Optionally, CSV's may be specified to ``PACKER_BUILDS``
+ to limit the base-images produced. For example,
+ ``PACKER_BUILDS=fedora,image-builder-image``.
+
+If there is an existing 'image-builder-image' within GCE, it may be utilized
+to produce base-images (in addition to cache-images). However it must be
+created with support for nested-virtualization, and with elevated cloud
+privileges (to access GCE, from within the GCE VM). For example:
+
+```
+$ alias pgcloud='sudo podman run -it --rm -e AS_ID=$UID
+ -e AS_USER=$USER -v $HOME:$HOME:z quay.io/cevich/gcloud_centos:latest'
+
+$ URL=https://www.googleapis.com/auth
+$ SCOPES=$URL/userinfo.email,$URL/compute,$URL/devstorage.full_control
+
+# The --min-cpu-platform is critical for nested-virt.
+$ pgcloud compute instances create $USER-making-images \
+ --image-family image-builder-image \
+ --boot-disk-size "200GB" \
+ --min-cpu-platform "Intel Haswell" \
+ --machine-type n1-standard-2 \
+ --scopes $SCOPES
+```
+
+Alternatively, if there is no image-builder-image available yet, a bare-metal
+CentOS 7 machine with network access to GCE is required. Software dependencies
+can be obtained from the ``packer/image-builder-image_base_setup.sh`` script.
+
+In both cases, the following can be used to setup and build base-images.
+
+```
+$ IP_ADDRESS=1.2.3.4 # EXTERNAL_IP from command output above
+$ rsync -av $PWD centos@$IP_ADDRESS:.
+$ scp $GOOGLE_APPLICATION_CREDENTIALS centos@$IP_ADDRESS:.
+$ ssh centos@$IP_ADDRESS
+...
+```
+
+When ready, change to the ``packer`` sub-directory, and build the images:
+
+```
+$ cd libpod/contrib/cirrus/packer
+$ make libpod_base_images GCP_PROJECT_ID=<VALUE> \
+ GOOGLE_APPLICATION_CREDENTIALS=<VALUE> \
+ RHEL_IMAGE_FILE=<VALUE> \
+ RHEL_CSUM_FILE=<VALUE> \
+ RHSM_COMMAND=<VALUE> \
+ PACKER_BUILDS=<OPTIONAL>
+```
+
+Assuming this is successful (hence the semi-automatic part), packer will
+produce a ``packer-manifest.json`` output file. This contains the base-image
+names suitable for updating in ``.cirrus.yml``, `env` keys ``*_BASE_IMAGE``.
+
+On failure, it should be possible to determine the problem from the packer
+output. Sometimes that means setting `PACKER_LOG=1` and troubleshooting
+the nested virt calls. It's also possible to observe the (nested) qemu-kvm
+console output. Simply set the ``TTYDEV`` parameter, for example:
+
+```
+$ make libpod_base_images ... TTYDEV=$(tty)
+ ...
+```
diff --git a/contrib/cirrus/build_vm_images.sh b/contrib/cirrus/build_vm_images.sh
index ffbb2d5d5..ecdf1d877 100755
--- a/contrib/cirrus/build_vm_images.sh
+++ b/contrib/cirrus/build_vm_images.sh
@@ -8,12 +8,13 @@ CNI_COMMIT $CNI_COMMIT
CRIO_COMMIT $CRIO_COMMIT
RUNC_COMMIT $RUNC_COMMIT
PACKER_BUILDS $PACKER_BUILDS
+BUILT_IMAGE_SUFFIX $BUILT_IMAGE_SUFFIX
CENTOS_BASE_IMAGE $CENTOS_BASE_IMAGE
UBUNTU_BASE_IMAGE $UBUNTU_BASE_IMAGE
FEDORA_BASE_IMAGE $FEDORA_BASE_IMAGE
+FAH_BASE_IMAGE $FAH_BASE_IMAGE
RHEL_BASE_IMAGE $RHEL_BASE_IMAGE
RHSM_COMMAND $RHSM_COMMAND
-BUILT_IMAGE_SUFFIX $BUILT_IMAGE_SUFFIX
SERVICE_ACCOUNT $SERVICE_ACCOUNT
GCE_SSH_USERNAME $GCE_SSH_USERNAME
GCP_PROJECT_ID $GCP_PROJECT_ID
@@ -22,36 +23,30 @@ SCRIPT_BASE $SCRIPT_BASE
PACKER_BASE $PACKER_BASE
"
-require_regex '\*\*\*\s*CIRRUS:\s*REBUILD\s*IMAGES\s*\*\*\*' 'Not re-building VM images'
-
show_env_vars
# Everything here is running on the 'image-builder-image' GCE image
# Assume basic dependencies are all met, but there could be a newer version
# of the packer binary
PACKER_FILENAME="packer_${PACKER_VER}_linux_amd64.zip"
-mkdir -p "$HOME/packer"
-cd "$HOME/packer"
-# image_builder_image has packer pre-installed, check if same version requested
-if ! [[ -r "$PACKER_FILENAME" ]]
+if [[ -d "$HOME/packer" ]]
then
- curl -L -O https://releases.hashicorp.com/packer/$PACKER_VER/$PACKER_FILENAME
- curl -L https://releases.hashicorp.com/packer/${PACKER_VER}/packer_${PACKER_VER}_SHA256SUMS | \
- grep 'linux_amd64' > ./sha256sums
- sha256sum --check ./sha256sums
- unzip -o $PACKER_FILENAME
- ./packer --help &> /dev/null # verify exit(0)
+ cd "$HOME/packer"
+ # image_builder_image has packer pre-installed, check if same version requested
+ if [[ -r "$PACKER_FILENAME" ]]
+ then
+ cp $PACKER_FILENAME "$GOSRC/$PACKER_BASE/"
+ cp packer "$GOSRC/$PACKER_BASE/"
+ fi
fi
set -x
-cd "$GOSRC"
-# N/B: /usr/sbin/packer is a DIFFERENT tool, and will exit 0 given the args below :(
-TEMPLATE="./$PACKER_BASE/libpod_images.json"
-
-$HOME/packer/packer inspect "$TEMPLATE"
-
-#$HOME/packer/packer build -machine-readable "-only=$PACKER_BUILDS" "$TEMPLATE" | tee /tmp/packer_log.csv
-$HOME/packer/packer build "-only=$PACKER_BUILDS" "$TEMPLATE"
-
-# TODO: Report back to PR names of built images
+cd "$GOSRC/$PACKER_BASE"
+make libpod_images \
+ PACKER_BUILDS=$PACKER_BUILDS \
+ PACKER_VER=$PACKER_VER \
+ GOSRC=$GOSRC \
+ SCRIPT_BASE=$SCRIPT_BASE \
+ PACKER_BASE=$PACKER_BASE \
+ BUILT_IMAGE_SUFFIX=$BUILT_IMAGE_SUFFIX
diff --git a/contrib/cirrus/integration_test.sh b/contrib/cirrus/integration_test.sh
index 226053724..a50bd448f 100755
--- a/contrib/cirrus/integration_test.sh
+++ b/contrib/cirrus/integration_test.sh
@@ -9,7 +9,7 @@ OS_RELEASE_ID $OS_RELEASE_ID
OS_RELEASE_VER $OS_RELEASE_VER
"
-show_env_vars
+clean_env
set -x
cd "$GOSRC"
@@ -19,10 +19,13 @@ case "${OS_RELEASE_ID}-${OS_RELEASE_VER}" in
make test-binaries "BUILDTAGS=$BUILDTAGS"
SKIP_USERNS=1 make localintegration "BUILDTAGS=$BUILDTAGS"
;;
- fedora-28) ;& # Continue to the next item
+ fedora-29) ;& # Continue to the next item
+ fedora-28) ;&
centos-7) ;&
rhel-7)
- stub 'integration testing not working on $OS_RELEASE_ID'
+ make install PREFIX=/usr ETCDIR=/etc
+ make test-binaries
+ make localintegration
;;
*) bad_os_id_ver ;;
esac
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 2fa91258b..3b567b7a7 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -4,8 +4,21 @@
# to be sourced by other scripts, not called directly.
# Under some contexts these values are not set, make sure they are.
-USER="$(whoami)"
-HOME="$(getent passwd $USER | cut -d : -f 6)"
+export USER="$(whoami)"
+export HOME="$(getent passwd $USER | cut -d : -f 6)"
+
+# These are normally set by cirrus, if not use some reasonable defaults
+ENVLIB=${ENVLIB:-.bash_profile}
+CIRRUS_WORKING_DIR=${CIRRUS_WORKING_DIR:-/var/tmp/go/src/github.com/containers/libpod}
+SCRIPT_BASE=${SCRIPT_BASE:-./contrib/cirrus}
+PACKER_BASE=${PACKER_BASE:-./contrib/cirrus/packer}
+CIRRUS_BUILD_ID=${CIRRUS_BUILD_ID:-DEADBEEF} # a human
+cd "$CIRRUS_WORKING_DIR"
+CIRRUS_BASE_SHA=${CIRRUS_BASE_SHA:-$(git rev-parse upstream/master || git rev-parse origin/master)}
+CIRRUS_CHANGE_IN_REPO=${CIRRUS_CHANGE_IN_REPO:-$(git rev-parse HEAD)}
+CIRRUS_REPO_NAME=${CIRRUS_REPO_NAME:-libpod}
+cd -
+
if ! [[ "$PATH" =~ "/usr/local/bin" ]]
then
export PATH="$PATH:/usr/local/bin"
@@ -73,6 +86,18 @@ PACKER_BUILDS $PACKER_BUILDS
do
[[ -z "$NAME" ]] || echo "export $NAME=\"$VALUE\""
done
+ echo ""
+ echo "##### $(go version) #####"
+ echo ""
+}
+
+# Unset environment variables not needed for testing purposes
+clean_env() {
+ req_env_var "
+ UNSET_ENV_VARS $UNSET_ENV_VARS
+ "
+ echo "Unsetting $(echo $UNSET_ENV_VARS | wc -w) environment variables"
+ unset -v UNSET_ENV_VARS $UNSET_ENV_VARS || true # don't fail on read-only
}
# Return a GCE image-name compatible string representation of distribution name
@@ -98,15 +123,17 @@ stub() {
ircmsg() {
req_env_var "
- SCRIPT_BASE $SCRIPT_BASE
- GOSRC $GOSRC
CIRRUS_TASK_ID $CIRRUS_TASK_ID
1 $1
"
- SCRIPT="$GOSRC/$SCRIPT_BASE/podbot.py"
+ # Sometimes setup_environment.sh didn't run
+ SCRIPT="$(dirname $0)/podbot.py"
NICK="podbot_$CIRRUS_TASK_ID"
NICK="${NICK:0:15}" # Any longer will break things
+ set +e
$SCRIPT $NICK $1
+ echo "Ignoring exit($?)"
+ set -e
}
# Run sudo in directory with GOPATH set
@@ -117,23 +144,6 @@ cdsudo() {
sudo --preserve-env=GOPATH --non-interactive bash -c "$CMD"
}
-# Skip a build if $1 does not match in the PR Title/Description with message $2
-require_regex() {
- req_env_var "
- CIRRUS_CHANGE_MESSAGE $CIRRUS_CHANGE_MESSAGE
- 1 $1
- 2 $2
- "
- regex="$1"
- msg="$2"
- if ! echo "$CIRRUS_CHANGE_MESSAGE" | egrep -q "$regex"
- then
- echo "***** The PR Title/Description did not match the regular expression: $MAGIC_RE"
- echo "***** $msg"
- exit 0
- fi
-}
-
# Helper/wrapper script to only show stderr/stdout on non-zero exit
install_ooe() {
req_env_var "SCRIPT_BASE $SCRIPT_BASE"
@@ -171,6 +181,19 @@ install_cni_plugins() {
sudo cp bin/* /usr/libexec/cni
}
+install_runc_from_git(){
+ wd=$(pwd)
+ DEST="$GOPATH/src/github.com/opencontainers/runc"
+ rm -rf "$DEST"
+ ooe.sh git clone https://github.com/opencontainers/runc.git "$DEST"
+ cd "$DEST"
+ ooe.sh git fetch origin --tags
+ ooe.sh git checkout -q "$RUNC_COMMIT"
+ ooe.sh make static BUILDTAGS="seccomp selinux"
+ sudo install -m 755 runc /usr/bin/runc
+ cd $wd
+}
+
install_runc(){
OS_RELEASE_ID=$(os_release_id)
echo "Installing RunC from commit $RUNC_COMMIT"
@@ -193,14 +216,7 @@ install_runc(){
cd "$GOPATH/src/github.com/containers/libpod"
ooe.sh sudo make install.libseccomp.sudo
fi
- DEST="$GOPATH/src/github.com/opencontainers/runc"
- rm -rf "$DEST"
- ooe.sh git clone https://github.com/opencontainers/runc.git "$DEST"
- cd "$DEST"
- ooe.sh git fetch origin --tags
- ooe.sh git checkout -q "$RUNC_COMMIT"
- ooe.sh make static BUILDTAGS="seccomp selinux"
- sudo install -m 755 runc /usr/bin/runc
+ install_runc_from_git
}
install_buildah() {
@@ -277,21 +293,29 @@ install_varlink(){
}
_finalize(){
+ set +e # Don't fail at the very end
+ set +e # make errors non-fatal
echo "Removing leftover giblets from cloud-init"
cd /
sudo rm -rf /var/lib/cloud/instance?
sudo rm -rf /root/.ssh/*
sudo rm -rf /home/*
+ sudo rm -rf /tmp/*
+ sudo rm -rf /tmp/.??*
+ sync
+ sudo fstrim -av
}
rh_finalize(){
+ set +e # Don't fail at the very end
# Allow root ssh-logins
if [[ -r /etc/cloud/cloud.cfg ]]
then
sudo sed -re 's/^disable_root:.*/disable_root: 0/g' -i /etc/cloud/cloud.cfg
fi
echo "Resetting to fresh-state for usage as cloud-image."
- sudo $(type -P dnf || type -P yum) clean all
+ PKG=$(type -P dnf || type -P yum || echo "")
+ [[ -z "$PKG" ]] || sudo $PKG clean all # not on atomic
sudo rm -rf /var/cache/{yum,dnf}
sudo rm -f /etc/udev/rules.d/*-persistent-*.rules
sudo touch /.unconfigured # force firstboot to run
@@ -299,7 +323,35 @@ rh_finalize(){
}
ubuntu_finalize(){
+ set +e # Don't fail at the very end
echo "Resetting to fresh-state for usage as cloud-image."
sudo rm -rf /var/cache/apt
_finalize
}
+
+rhel_exit_handler() {
+ set +ex
+ req_env_var "
+ GOPATH $GOPATH
+ RHSMCMD $RHSMCMD
+ "
+ cd /
+ sudo rm -rf "$RHSMCMD"
+ sudo rm -rf "$GOPATH"
+ sudo subscription-manager remove --all
+ sudo subscription-manager unregister
+ sudo subscription-manager clean
+}
+
+rhsm_enable() {
+ req_env_var "
+ RHSM_COMMAND $RHSM_COMMAND
+ "
+ export GOPATH="$(mktemp -d)"
+ export RHSMCMD="$(mktemp)"
+ trap "rhel_exit_handler" EXIT
+ # Avoid logging sensitive details
+ echo "$RHSM_COMMAND" > "$RHSMCMD"
+ ooe.sh sudo bash "$RHSMCMD"
+ sudo rm -rf "$RHSMCMD"
+}
diff --git a/contrib/cirrus/optional_system_test.sh b/contrib/cirrus/optional_system_test.sh
deleted file mode 100755
index 705dda5ad..000000000
--- a/contrib/cirrus/optional_system_test.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-set -e
-source $(dirname $0)/lib.sh
-
-MAGIC_RE='\*\*\*\s*CIRRUS:\s*SYSTEM\s*TEST\s*\*\*\*'
-if ! echo "$CIRRUS_CHANGE_MESSAGE" | egrep -q "$MAGIC_RE"
-then
- echo "Skipping system-testing because PR title or description"
- echo "does not match regular expression: $MAGIC_RE"
- exit 0
-fi
-
-req_env_var "
-GOSRC $GOSRC
-OS_RELEASE_ID $OS_RELEASE_ID
-OS_RELEASE_VER $OS_RELEASE_VER
-"
-
-show_env_vars
-
-set -x
-cd "$GOSRC"
-make localsystem
diff --git a/contrib/cirrus/packer/.gitignore b/contrib/cirrus/packer/.gitignore
new file mode 100644
index 000000000..8f7bdeaf7
--- /dev/null
+++ b/contrib/cirrus/packer/.gitignore
@@ -0,0 +1,7 @@
+*json
+packer
+packer*zip
+packer_cache
+cidata*
+meta-data
+user-data
diff --git a/contrib/cirrus/packer/Makefile b/contrib/cirrus/packer/Makefile
new file mode 100644
index 000000000..9bf27373e
--- /dev/null
+++ b/contrib/cirrus/packer/Makefile
@@ -0,0 +1,108 @@
+
+# N/B: PACKER_BUILDS variable is required. Should contain CSV of
+# builder name(s) from applicable YAML file,
+# e.g for names see libpod_images.yml
+
+PACKER_VER ?= 1.3.1
+PACKER_DIST_FILENAME := packer_${PACKER_VER}_linux_amd64.zip
+
+# Only needed for libpod_base_images target
+TIMESTAMP := $(shell date +%s)
+GOSRC ?= $(shell realpath "./../../../")
+PACKER_BASE ?= contrib/cirrus/packer
+SCRIPT_BASE ?= contrib/cirrus
+
+# For debugging nested-virt, use
+#TTYDEV := $(shell tty)
+TTYDEV := /dev/null
+
+.PHONY: all
+all: libpod_images
+
+%.json: %.yml
+ @python3 -c 'import json,yaml; json.dump( yaml.load(open("$<").read()), open("$@","w"), indent=2);'
+
+${PACKER_DIST_FILENAME}:
+ @curl -L --silent --show-error \
+ -O https://releases.hashicorp.com/packer/${PACKER_VER}/${PACKER_DIST_FILENAME}
+
+packer: ${PACKER_DIST_FILENAME}
+ @curl -L --silent --show-error \
+ https://releases.hashicorp.com/packer/${PACKER_VER}/packer_${PACKER_VER}_SHA256SUMS \
+ | grep 'linux_amd64' > /tmp/packer_sha256sums
+ @sha256sum --check /tmp/packer_sha256sums
+ @unzip -o ${PACKER_DIST_FILENAME}
+ @touch --reference=Makefile ${PACKER_DIST_FILENAME}
+
+.PHONY: test
+test: libpod_base_images.json libpod_images.json packer
+ ./packer inspect libpod_base_images.json > /dev/null
+ ./packer inspect libpod_images.json > /dev/null
+ @echo "All good"
+
+.PHONY: libpod_images
+libpod_images: libpod_images.json packer
+ifndef PACKER_BUILDS
+ $(error PACKER_BUILDS is undefined, expected builder-names CSV)
+endif
+ ./packer build -only=${PACKER_BUILDS} \
+ -var GOSRC=$(GOSRC) \
+ -var PACKER_BASE=$(PACKER_BASE) \
+ -var SCRIPT_BASE=$(SCRIPT_BASE) \
+ libpod_images.json
+ @echo ""
+ @echo "Finished. The images mentioned above, and in packer-manifest.json"
+ @echo "can be used in .cirrus.yml as values for the 'image_name' keys"
+ @echo ""
+
+cidata.ssh:
+ ssh-keygen -f $@ -P "" -q
+
+cidata.ssh.pub: cidata.ssh
+ touch $@
+
+meta-data:
+ echo "local-hostname: localhost.localdomain" > $@
+
+user-data: cidata.ssh.pub
+ bash make-user-data.sh
+
+cidata.iso: user-data meta-data
+ genisoimage -output cidata.iso -volid cidata -input-charset utf-8 -joliet -rock user-data meta-data
+
+# This is intended to be run by a human, with admin access to the libpod GCE project.
+.PHONY: libpod_base_images
+libpod_base_images: libpod_base_images.json cidata.iso cidata.ssh packer
+ifndef GCP_PROJECT_ID
+ $(error GCP_PROJECT_ID is undefined, expected complete GCP project ID string e.g. foobar-12345)
+endif
+ifndef GOOGLE_APPLICATION_CREDENTIALS
+ $(error GOOGLE_APPLICATION_CREDENTIALS is undefined, expected absolute path to JSON file, like $HOME/.config/gcloud/legacy_credentials/*/adc.json)
+endif
+ifndef RHEL_IMAGE_FILE
+ $(error RHEL_IMAGE_FILE is undefined, expected full path to a rhel-server-ec2-*.raw.xz file)
+endif
+ifndef RHEL_CSUM_FILE
+ $(error RHEL_CSUM_FILE is undefined, expected full path to a rhel-server-ec2-*.raw.xz.SHA256SUM file)
+endif
+ifndef RHSM_COMMAND
+ $(error RHSM_COMMAND is undefined, expected string required for temporarily registering VM)
+endif
+ PACKER_CACHE_DIR=/tmp ./packer build \
+ -var TIMESTAMP=$(TIMESTAMP) \
+ -var TTYDEV=$(TTYDEV) \
+ -var GCP_PROJECT_ID=$(GCP_PROJECT_ID) \
+ -var GOOGLE_APPLICATION_CREDENTIALS=$(GOOGLE_APPLICATION_CREDENTIALS) \
+ -var GOSRC=$(GOSRC) \
+ -var PACKER_BASE=$(PACKER_BASE) \
+ -var SCRIPT_BASE=$(SCRIPT_BASE) \
+ -var RHEL_BASE_IMAGE_NAME=$(shell basename $(RHEL_IMAGE_FILE) | tr -d '[[:space:]]' | sed -r -e 's/\.x86_64\.raw\.xz//' | tr '[[:upper:]]' '[[:lower:]]' | tr '[[:punct:]]' '-') \
+ -var RHEL_IMAGE_FILE=$(RHEL_IMAGE_FILE) \
+ -var RHEL_CSUM_FILE=$(RHEL_CSUM_FILE) \
+ -var 'RHSM_COMMAND=$(RHSM_COMMAND)' \
+ -only $(PACKER_BUILDS) \
+ libpod_base_images.json
+ @echo ""
+ @echo "Finished. The images mentioned above, and in packer-manifest.json"
+ @echo "can be used in .cirrus.yml as values for the *_BASE_IMAGE keys."
+ @echo ""
diff --git a/contrib/cirrus/packer/README.md b/contrib/cirrus/packer/README.md
index 8ff6947e9..9a07ed960 100644
--- a/contrib/cirrus/packer/README.md
+++ b/contrib/cirrus/packer/README.md
@@ -1,2 +1,3 @@
These are definitions and scripts consumed by packer to produce the
-various distribution images used for CI testing.
+various distribution images used for CI testing. For more details
+see the [Cirrus CI documentation](../README.md)
diff --git a/contrib/cirrus/packer/centos_setup.sh b/contrib/cirrus/packer/centos_setup.sh
index 7b2308739..a13050569 100644
--- a/contrib/cirrus/packer/centos_setup.sh
+++ b/contrib/cirrus/packer/centos_setup.sh
@@ -29,6 +29,7 @@ ooe.sh sudo yum -y install \
btrfs-progs-devel \
bzip2 \
device-mapper-devel \
+ emacs-nox \
findutils \
glib2-devel \
glibc-static \
@@ -63,6 +64,7 @@ ooe.sh sudo yum -y install \
runc \
skopeo-containers \
unzip \
+ vim \
which \
xz
diff --git a/contrib/cirrus/packer/fah_base-setup.sh b/contrib/cirrus/packer/fah_base-setup.sh
new file mode 100644
index 000000000..606c4f336
--- /dev/null
+++ b/contrib/cirrus/packer/fah_base-setup.sh
@@ -0,0 +1,45 @@
+
+# N/B: This script is not intended to be run by humans. It is used to configure the
+# FAH base image for importing, so that it will boot in GCE.
+
+set -e
+
+# Load in library (copied by packer, before this script was run)
+source $GOSRC/$SCRIPT_BASE/lib.sh
+
+install_ooe
+
+if [[ "$1" == "pre" ]]
+then
+ echo "Upgrading Atomic Host"
+ setenforce 0
+ ooe.sh atomic host upgrade
+
+ echo "Configuring Repositories"
+ ooe.sh sudo tee /etc/yum.repos.d/ngompa-gce-oslogin.repo <<EOF
+[ngompa-gce-oslogin]
+name=Copr repo for gce-oslogin owned by ngompa
+baseurl=https://copr-be.cloud.fedoraproject.org/results/ngompa/gce-oslogin/fedora-\$releasever-\$basearch/
+type=rpm-md
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/ngompa/gce-oslogin/pubkey.gpg
+repo_gpgcheck=0
+enabled=1
+enabled_metadata=1
+EOF
+ echo "Installing necessary packages and google services"
+ # Google services are enabled by default, upon install.
+ ooe.sh rpm-ostree install rng-tools google-compute-engine google-compute-engine-oslogin
+ echo "Rebooting..."
+ systemctl reboot # Required for upgrade + package installs to be active
+elif [[ "$1" == "post" ]]
+then
+ echo "Enabling necessary services"
+ systemctl enable rngd # Must reboot before enabling
+ rh_finalize
+ echo "SUCCESS!"
+else
+ echo "Expected to be called with 'pre' or 'post'"
+ exit 6
+fi
diff --git a/contrib/cirrus/packer/fah_setup.sh b/contrib/cirrus/packer/fah_setup.sh
new file mode 100644
index 000000000..2e053b396
--- /dev/null
+++ b/contrib/cirrus/packer/fah_setup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# This script is called by packer on the subject fah VM, to setup the podman
+# build/test environment. It's not intended to be used outside of this context.
+
+set -e
+
+# Load in library (copied by packer, before this script was run)
+source /tmp/libpod/$SCRIPT_BASE/lib.sh
+
+req_env_var "
+SCRIPT_BASE $SCRIPT_BASE
+"
+
+install_ooe
+
+ooe.sh sudo atomic host upgrade
+
+ooe.sh sudo rpm-ostree uninstall cloud-init
+
+rh_finalize
+
+echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/fedora_base-setup.sh b/contrib/cirrus/packer/fedora_base-setup.sh
new file mode 100644
index 000000000..c0a1e422c
--- /dev/null
+++ b/contrib/cirrus/packer/fedora_base-setup.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# N/B: This script is not intended to be run by humans. It is used to configure the
+# fedora base image for importing, so that it will boot in GCE
+
+set -e
+
+# Load in library (copied by packer, before this script was run)
+source $GOSRC/$SCRIPT_BASE/lib.sh
+
+[[ "$1" == "post" ]] || exit 0 # nothing to do
+
+install_ooe
+
+echo "Updating packages"
+ooe.sh dnf -y update
+
+echo "Installing necessary packages and google services"
+ooe.sh dnf -y copr enable ngompa/gce-oslogin
+ooe.sh dnf -y install rng-tools google-compute-engine google-compute-engine-oslogin
+
+echo "Enabling services"
+ooe.sh systemctl enable rngd
+
+rh_finalize
+
+echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/fedora_setup.sh b/contrib/cirrus/packer/fedora_setup.sh
index f9fea04a7..01c468901 100644
--- a/contrib/cirrus/packer/fedora_setup.sh
+++ b/contrib/cirrus/packer/fedora_setup.sh
@@ -10,6 +10,7 @@ source /tmp/libpod/$SCRIPT_BASE/lib.sh
req_env_var "
SCRIPT_BASE $SCRIPT_BASE
+FEDORA_CNI_COMMIT $FEDORA_CNI_COMMIT
CNI_COMMIT $CNI_COMMIT
CRIO_COMMIT $CRIO_COMMIT
CRIU_COMMIT $CRIU_COMMIT
@@ -27,8 +28,8 @@ ooe.sh sudo dnf install -y \
atomic-registries \
btrfs-progs-devel \
bzip2 \
- conmon \
device-mapper-devel \
+ emacs-nox \
findutils \
git \
glib2-devel \
@@ -65,11 +66,14 @@ ooe.sh sudo dnf install -y \
runc \
skopeo-containers \
slirp4netns \
+ unzip \
+ vim \
which \
xz
install_varlink
+CNI_COMMIT=$FEDORA_CNI_COMMIT
install_cni_plugins
install_buildah
diff --git a/contrib/cirrus/packer/image-builder-image_base-setup.sh b/contrib/cirrus/packer/image-builder-image_base-setup.sh
new file mode 100644
index 000000000..b8e2824a7
--- /dev/null
+++ b/contrib/cirrus/packer/image-builder-image_base-setup.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+# This script is called by packer on a vanilla CentOS VM, to setup the image
+# used for building images FROM base images. It's not intended to be used
+# outside of this context.
+
+set -e
+
+[[ "$1" == "post" ]] || exit 0 # pre stage not needed
+
+# Load in library (copied by packer, before this script was run)
+source $GOSRC/$SCRIPT_BASE/lib.sh
+
+req_env_var "
+ TIMESTAMP $TIMESTAMP
+ GOSRC $GOSRC
+ SCRIPT_BASE $SCRIPT_BASE
+ PACKER_BASE $PACKER_BASE
+"
+
+install_ooe
+
+echo "Updating packages"
+ooe.sh sudo yum -y update
+
+echo "Configuring repositories"
+ooe.sh sudo yum -y install centos-release-scl epel-release
+
+echo "Installing packages"
+ooe.sh sudo yum -y install \
+ genisoimage \
+ golang \
+ google-cloud-sdk \
+ libvirt \
+ libvirt-admin \
+ libvirt-client \
+ libvirt-daemon \
+ make \
+ python34 \
+ python34 \
+ python34-PyYAML \
+ python34-PyYAML \
+ qemu-img \
+ qemu-kvm \
+ qemu-kvm-tools \
+ qemu-user \
+ rsync \
+ unzip \
+ util-linux \
+ vim
+
+sudo ln -s /usr/libexec/qemu-kvm /usr/bin/
+
+sudo tee /etc/modprobe.d/kvm-nested.conf <<EOF
+options kvm-intel nested=1
+options kvm-intel enable_shadow_vmcs=1
+options kvm-intel enable_apicv=1
+options kvm-intel ept=1
+EOF
+
+echo "Installing packer"
+sudo mkdir -p /root/$(basename $PACKER_BASE)
+sudo cp $GOSRC/$PACKER_BASE/*packer* /root/$(basename $PACKER_BASE)
+sudo mkdir -p /root/$(basename $SCRIPT_BASE)
+sudo cp $GOSRC/$SCRIPT_BASE/*.sh /root/$(basename $SCRIPT_BASE)
+
+install_scl_git
+
+echo "Cleaning up"
+cd /
+rm -rf $GOSRC
+
+rh_finalize
+
+echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/libpod_base_images.yml b/contrib/cirrus/packer/libpod_base_images.yml
new file mode 100644
index 000000000..109b9b8d5
--- /dev/null
+++ b/contrib/cirrus/packer/libpod_base_images.yml
@@ -0,0 +1,193 @@
+---
+
+variables:
+ # Complete local path to this repository (Required)
+ GOSRC:
+ # Relative path to this (packer) subdirectory (Required)
+ PACKER_BASE:
+ # Relative path to cirrus scripts subdirectory (Required)
+ SCRIPT_BASE:
+ # Unique ID for naming new base-images (required)
+ TIMESTAMP:
+ # Required for output from qemu builders
+ TTYDEV:
+ # RHEL images require click-through agreements to obtain (required)
+ RHEL_BASE_IMAGE_NAME:
+ RHEL_IMAGE_FILE:
+ RHEL_CSUM_FILE:
+ # RHEL requires a subscription to install/update packages
+ RHSM_COMMAND:
+
+ # Latest Fedora release
+ FEDORA_IMAGE_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/29/Cloud/x86_64/images/Fedora-Cloud-Base-29-1.2.x86_64.qcow2"
+ FEDORA_CSUM_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/29/Cloud/x86_64/images/Fedora-Cloud-29-1.2-x86_64-CHECKSUM"
+ FEDORA_BASE_IMAGE_NAME: 'fedora-cloud-base-29-1-2' # Name to use in GCE
+ # Prior Fedora release
+ PRIOR_FEDORA_IMAGE_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/28/Cloud/x86_64/images/Fedora-Cloud-Base-28-1.1.x86_64.qcow2"
+ PRIOR_FEDORA_CSUM_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/28/Cloud/x86_64/images/Fedora-Cloud-28-1.1-x86_64-CHECKSUM"
+ PRIOR_FEDORA_BASE_IMAGE_NAME: 'fedora-cloud-base-28-1-1' # Name to use in GCE
+ FAH_IMAGE_URL: "https://dl.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-29-20181025.1/AtomicHost/x86_64/images/Fedora-AtomicHost-29-20181025.1.x86_64.qcow2"
+ FAH_CSUM_URL: "https://dl.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-29-20181025.1/AtomicHost/x86_64/images/Fedora-AtomicHost-29-20181025.1-x86_64-CHECKSUM"
+ FAH_BASE_IMAGE_NAME: 'fedora-atomichost-29-20181025-1' # Name to use in GCE
+
+ # The name of the image in GCE used for packer build libpod_images.yml
+ IBI_BASE_NAME: 'image-builder-image'
+ CIDATA_ISO: 'cidata.iso' # produced by Makefile
+
+ # Path to json file (required, likely ~/.config/gcloud/legacy_credentials/*/adc.json)
+ GOOGLE_APPLICATION_CREDENTIALS:
+ # The complete project ID (required, not the short name)
+ GCP_PROJECT_ID:
+ # Pre-existing storage bucket w/ lifecycle-enabled
+ XFERBUCKET: "packer-import" # pre-created, globally unique, lifecycle-enabled
+
+# Don't leak sensitive values in error messages / output
+sensitive-variables:
+ - 'GOOGLE_APPLICATION_CREDENTIALS'
+ - 'GCP_PROJECT_ID'
+ - 'RHSM_COMMAND'
+
+# What images to produce in which cloud
+builders:
+ - name: '{{user `IBI_BASE_NAME`}}'
+ type: 'googlecompute'
+ image_name: '{{user `IBI_BASE_NAME`}}-{{user `TIMESTAMP`}}'
+ image_family: '{{user `IBI_BASE_NAME`}}'
+ source_image_project_id: 'centos-cloud'
+ source_image_family: 'centos-7'
+ project_id: '{{user `GCP_PROJECT_ID`}}'
+ account_file: '{{user `GOOGLE_APPLICATION_CREDENTIALS`}}'
+ communicator: 'ssh'
+ ssh_username: 'centos'
+ ssh_pty: 'true'
+ # The only supported zone in Cirrus-CI, as of addition of this comment
+ zone: 'us-central1-a'
+ # Enable nested virtualization in case it's ever needed
+ image_licenses:
+ - 'https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx'
+ min_cpu_platform: "Intel Broadwell" # nested-virt requirement
+
+ - &nested_virt
+ name: 'fedora'
+ type: 'qemu'
+ accelerator: "kvm"
+ iso_url: '{{user `FEDORA_IMAGE_URL`}}'
+ disk_image: true
+ format: "raw"
+ disk_size: 5120
+ iso_checksum_url: '{{user `FEDORA_CSUM_URL`}}'
+ iso_checksum_type: "sha256"
+ output_directory: '/tmp/{{build_name}}'
+ vm_name: "disk.raw" # actually qcow2, name required for post-processing
+ boot_wait: '5s'
+ shutdown_command: 'shutdown -h now'
+ headless: true
+ qemu_binary: "/usr/libexec/qemu-kvm"
+ qemuargs: # List-of-list format required to override packer-generated args
+ - - "-m"
+ - "1024"
+ - - "-cpu"
+ - "host"
+ - - "-device"
+ - "virtio-rng-pci"
+ - - "-chardev"
+ - "tty,id=pts,path={{user `TTYDEV`}}"
+ - - "-device"
+ - "isa-serial,chardev=pts"
+ - - "-cdrom"
+ - "{{user `CIDATA_ISO`}}"
+ - - "-netdev"
+ - "user,id=net0,hostfwd=tcp::{{ .SSHHostPort }}-:22"
+ - - "-device"
+ - "virtio-net,netdev=net0"
+ communicator: 'ssh'
+ ssh_private_key_file: 'cidata.ssh'
+ ssh_username: 'root'
+
+ - <<: *nested_virt
+ name: 'prior_fedora'
+ iso_url: '{{user `PRIOR_FEDORA_IMAGE_URL`}}'
+ iso_checksum_url: '{{user `PRIOR_FEDORA_CSUM_URL`}}'
+
+ - <<: *nested_virt
+ name: 'fah'
+ iso_url: '{{user `FAH_IMAGE_URL`}}'
+ iso_checksum_url: '{{user `FAH_CSUM_URL`}}'
+ disk_size: 10240
+
+ - <<: *nested_virt
+ name: 'rhel'
+ iso_url: 'file://{{user `RHEL_IMAGE_FILE`}}'
+ iso_checksum_url: 'file://{{user `RHEL_CSUM_FILE`}}'
+ disk_size: 10240
+
+provisioners:
+ - type: 'shell'
+ inline:
+ - 'mkdir -p /tmp/libpod/{{user `SCRIPT_BASE`}}'
+ - 'mkdir -p /tmp/libpod/{{user `PACKER_BASE`}}'
+
+ - type: 'file'
+ source: '{{user `GOSRC`}}/.cirrus.yml'
+ destination: '/tmp/libpod/.cirrus.yml'
+
+ - type: 'file'
+ source: '{{user `GOSRC`}}/{{user `SCRIPT_BASE`}}/'
+ destination: '/tmp/libpod/{{user `SCRIPT_BASE`}}/'
+
+ - type: 'file'
+ source: '{{user `GOSRC`}}/{{user `PACKER_BASE`}}/'
+ destination: '/tmp/libpod/{{user `PACKER_BASE`}}/'
+
+ - &shell_script
+ type: 'shell'
+ inline:
+ - 'chmod +x /tmp/libpod/{{user `PACKER_BASE`}}/{{build_name}}_base-setup.sh'
+ - '/tmp/libpod/{{user `PACKER_BASE`}}/{{build_name}}_base-setup.sh pre'
+ expect_disconnect: true # Allow this to reboot the VM
+ environment_vars:
+ - 'TIMESTAMP={{user `TIMESTAMP`}}'
+ - 'GOSRC=/tmp/libpod'
+ - 'SCRIPT_BASE={{user `SCRIPT_BASE`}}'
+ - 'PACKER_BASE={{user `PACKER_BASE`}}'
+ - 'RHSM_COMMAND={{user `RHSM_COMMAND`}}'
+
+ - <<: *shell_script
+ inline: ['{{user `GOSRC`}}/{{user `PACKER_BASE`}}/{{build_name}}_base-setup.sh']
+ expect_disconnect: false
+ pause_before: '10s'
+ inline:
+ - '/tmp/libpod/{{user `PACKER_BASE`}}/{{build_name}}_base-setup.sh post'
+
+post-processors:
+ - - type: "compress"
+ only: ['fedora', 'prior_fedora', 'fah', 'rhel']
+ output: '/tmp/{{build_name}}/disk.raw.tar.gz'
+ format: '.tar.gz'
+ compression_level: 9
+ - &gcp_import
+ only: ['fedora']
+ type: "googlecompute-import"
+ project_id: '{{user `GCP_PROJECT_ID`}}'
+ account_file: '{{user `GOOGLE_APPLICATION_CREDENTIALS`}}'
+ bucket: '{{user `XFERBUCKET`}}'
+ gcs_object_name: '{{build_name}}-{{user `TIMESTAMP`}}-{{uuid}}.tar.gz'
+ image_name: "{{user `FEDORA_BASE_IMAGE_NAME`}}-{{user `TIMESTAMP`}}"
+ image_description: 'Based on {{user `FEDORA_IMAGE_URL`}}'
+ image_family: '{{user `FEDORA_BASE_IMAGE_NAME`}}'
+ - <<: *gcp_import
+ only: ['prior_fedora']
+ image_name: "{{user `PRIOR_FEDORA_BASE_IMAGE_NAME`}}-{{user `TIMESTAMP`}}"
+ image_description: 'Based on {{user `PRIOR_FEDORA_IMAGE_URL`}}'
+ image_family: '{{user `PRIOR_FEDORA_BASE_IMAGE_NAME`}}'
+ - <<: *gcp_import
+ only: ['fah']
+ image_name: "{{user `FAH_BASE_IMAGE_NAME`}}-{{user `TIMESTAMP`}}"
+ image_description: 'Based on {{user `FAH_IMAGE_URL`}}'
+ image_family: '{{user `FAH_BASE_IMAGE_NAME`}}'
+ - <<: *gcp_import
+ only: ['rhel']
+ image_name: "{{user `RHEL_BASE_IMAGE_NAME`}}-{{user `TIMESTAMP`}}"
+ image_description: 'Based on {{user `RHEL_IMAGE_FILE`}}'
+ image_family: '{{user `RHEL_BASE_IMAGE_NAME`}}'
+ - type: 'manifest'
diff --git a/contrib/cirrus/packer/libpod_images.json b/contrib/cirrus/packer/libpod_images.json
deleted file mode 100644
index 9dac3e8ea..000000000
--- a/contrib/cirrus/packer/libpod_images.json
+++ /dev/null
@@ -1,130 +0,0 @@
-{
- "variables": {
- "FEDORA_CNI_COMMIT": "{{env `FEDORA_CNI_COMMIT`}}",
- "CNI_COMMIT": "{{env `CNI_COMMIT`}}",
- "CRIO_COMMIT": "{{env `CRIO_COMMIT`}}",
- "CRIU_COMMIT": "{{env `CRIU_COMMIT`}}",
- "RUNC_COMMIT": "{{env `RUNC_COMMIT`}}",
-
- "CENTOS_BASE_IMAGE": "{{env `CENTOS_BASE_IMAGE`}}" ,
- "UBUNTU_BASE_IMAGE": "{{env `UBUNTU_BASE_IMAGE`}}",
- "FEDORA_BASE_IMAGE": "{{env `FEDORA_BASE_IMAGE`}}",
- "RHEL_BASE_IMAGE": "{{env `RHEL_BASE_IMAGE`}}",
-
- "GOSRC": "{{env `GOSRC`}}",
- "PACKER_BASE": "{{env `PACKER_BASE`}}",
- "SCRIPT_BASE": "{{env `SCRIPT_BASE`}}",
-
- "SERVICE_ACCOUNT": "{{env `SERVICE_ACCOUNT`}}",
- "GCP_PROJECT_ID": "{{env `GCP_PROJECT_ID`}}",
- "BUILT_IMAGE_SUFFIX": "{{env `BUILT_IMAGE_SUFFIX`}}",
- "GCE_SSH_USERNAME": "{{env `GCE_SSH_USERNAME`}}",
- "RHSM_COMMAND": "{{env `RHSM_COMMAND`}}"
- },
- "sensitive-variables": [
- "GCP_PROJECT_ID", "SERVICE_ACCOUNT", "GCE_SSH_USERNAME", "RHSM_COMMAND"
- ],
- "builders": [
- {
- "name": "rhel-7",
- "type": "googlecompute",
- "project_id": "{{user `GCP_PROJECT_ID`}}",
- "zone": "us-central1-a",
- "source_image": "{{user `RHEL_BASE_IMAGE`}}",
- "image_name": "{{user `RHEL_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
- "image_family": "{{user `RHEL_BASE_IMAGE`}}-libpod",
- "service_account_email": "{{user `SERVICE_ACCOUNT`}}",
- "communicator": "ssh",
- "ssh_username": "ec2-user",
- "ssh_pty": "true"
- },{
- "name": "centos-7",
- "type": "googlecompute",
- "project_id": "{{user `GCP_PROJECT_ID`}}",
- "zone": "us-central1-a",
- "source_image": "{{user `CENTOS_BASE_IMAGE`}}",
- "image_name": "{{user `CENTOS_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
- "image_family": "{{user `CENTOS_BASE_IMAGE`}}-libpod",
- "service_account_email": "{{user `SERVICE_ACCOUNT`}}",
- "communicator": "ssh",
- "ssh_username": "{{user `GCE_SSH_USERNAME`}}",
- "ssh_pty": "true"
- },{
- "name": "fedora-28",
- "type": "googlecompute",
- "project_id": "{{user `GCP_PROJECT_ID`}}",
- "zone": "us-central1-a",
- "source_image": "{{user `FEDORA_BASE_IMAGE`}}",
- "image_name": "{{user `FEDORA_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
- "image_family": "{{user `FEDORA_BASE_IMAGE`}}-libpod",
- "service_account_email": "{{user `SERVICE_ACCOUNT`}}",
- "communicator": "ssh",
- "ssh_username": "fedora",
- "ssh_pty": "true"
- },{
- "name": "ubuntu-18",
- "type": "googlecompute",
- "project_id": "{{user `GCP_PROJECT_ID`}}",
- "zone": "us-central1-a",
- "source_image": "{{user `UBUNTU_BASE_IMAGE`}}",
- "image_name": "{{user `UBUNTU_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
- "image_family": "{{user `UBUNTU_BASE_IMAGE`}}-libpod",
- "service_account_email": "{{user `SERVICE_ACCOUNT`}}",
- "communicator": "ssh",
- "ssh_username": "{{user `GCE_SSH_USERNAME`}}",
- "ssh_pty": "true"
- }
- ],
- "provisioners": [
- {
- "type": "file",
- "source": "{{user `GOSRC`}}",
- "destination": "/tmp/libpod"
- },{
- "type": "shell",
- "only": ["rhel-7"],
- "script": "{{user `GOSRC`}}/{{user `PACKER_BASE`}}/rhel_setup.sh",
- "environment_vars": [
- "SCRIPT_BASE={{user `SCRIPT_BASE`}}",
- "CNI_COMMIT={{user `CNI_COMMIT`}}",
- "CRIO_COMMIT={{user `CRIO_COMMIT`}}",
- "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
- "RUNC_COMMIT={{user `RUNC_COMMIT`}}",
- "RHSM_COMMAND={{user `RHSM_COMMAND`}}"
- ]
- },{
- "type": "shell",
- "only": ["centos-7"],
- "script": "{{user `GOSRC`}}/{{user `PACKER_BASE`}}/centos_setup.sh",
- "environment_vars": [
- "SCRIPT_BASE={{user `SCRIPT_BASE`}}",
- "CNI_COMMIT={{user `CNI_COMMIT`}}",
- "CRIO_COMMIT={{user `CRIO_COMMIT`}}",
- "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
- "RUNC_COMMIT={{user `RUNC_COMMIT`}}"
- ]
- },{
- "type": "shell",
- "only": ["fedora-28"],
- "script": "{{user `GOSRC`}}/{{user `PACKER_BASE`}}/fedora_setup.sh",
- "environment_vars": [
- "SCRIPT_BASE={{user `SCRIPT_BASE`}}",
- "CNI_COMMIT={{user `FEDORA_CNI_COMMIT`}}",
- "CRIO_COMMIT={{user `CRIO_COMMIT`}}",
- "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
- "RUNC_COMMIT={{user `RUNC_COMMIT`}}"
- ]
- },{
- "type": "shell",
- "only": ["ubuntu-18"],
- "script": "{{user `GOSRC`}}/{{user `PACKER_BASE`}}/ubuntu_setup.sh",
- "environment_vars": [
- "SCRIPT_BASE={{user `SCRIPT_BASE`}}",
- "CNI_COMMIT={{user `CNI_COMMIT`}}",
- "CRIO_COMMIT={{user `CRIO_COMMIT`}}",
- "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
- "RUNC_COMMIT={{user `RUNC_COMMIT`}}"
- ]
- }
- ]
-}
diff --git a/contrib/cirrus/packer/libpod_images.yml b/contrib/cirrus/packer/libpod_images.yml
new file mode 100644
index 000000000..d31c11a8d
--- /dev/null
+++ b/contrib/cirrus/packer/libpod_images.yml
@@ -0,0 +1,96 @@
+---
+
+# All of these are required
+variables:
+ # Names of GCE Base images to start from, in .cirrus.yml
+ RHEL_BASE_IMAGE: '{{env `RHEL_BASE_IMAGE`}}'
+ CENTOS_BASE_IMAGE: '{{env `CENTOS_BASE_IMAGE`}}'
+ UBUNTU_BASE_IMAGE: '{{env `UBUNTU_BASE_IMAGE`}}'
+ FEDORA_BASE_IMAGE: '{{env `FEDORA_BASE_IMAGE`}}'
+ PRIOR_FEDORA_BASE_IMAGE: '{{env `PRIOR_FEDORA_BASE_IMAGE`}}'
+ FAH_BASE_IMAGE: '{{env `FAH_BASE_IMAGE`}}'
+
+ # libpod dependencies to build and install into images
+ FEDORA_CNI_COMMIT: "{{env `FEDORA_CNI_COMMIT`}}"
+ CNI_COMMIT: "{{env `CNI_COMMIT`}}"
+ CRIO_COMMIT: "{{env `CRIO_COMMIT`}}"
+ CRIU_COMMIT: "{{env `CRIU_COMMIT`}}"
+ RUNC_COMMIT: "{{env `RUNC_COMMIT`}}"
+
+ BUILT_IMAGE_SUFFIX: '{{env `BUILT_IMAGE_SUFFIX`}}'
+ GOSRC: '{{env `GOSRC`}}'
+ PACKER_BASE: '{{env `PACKER_BASE`}}'
+ SCRIPT_BASE: '{{env `SCRIPT_BASE`}}'
+
+ # Protected credentials, decrypted by Cirrus at runtime
+ GCE_SSH_USERNAME: '{{env `GCE_SSH_USERNAME`}}'
+ GCP_PROJECT_ID: '{{env `GCP_PROJECT_ID`}}'
+ RHSM_COMMAND: '{{env `RHSM_COMMAND`}}'
+ SERVICE_ACCOUNT: '{{env `SERVICE_ACCOUNT`}}'
+ GOOGLE_APPLICATION_CREDENTIALS: '{{env `GOOGLE_APPLICATION_CREDENTIALS`}}'
+
+# Don't leak sensitive values in error messages / output
+sensitive-variables:
+ - 'GCE_SSH_USERNAME'
+ - 'GCP_PROJECT_ID'
+ - 'RHSM_COMMAND'
+ - 'SERVICE_ACCOUNT'
+
+# What images to produce in which cloud
+builders:
+ # v----- is a YAML anchor, allows referencing this object by name (below)
+ - &gce_hosted_image
+ name: 'ubuntu-18'
+ type: 'googlecompute'
+ image_name: '{{build_name}}{{user `BUILT_IMAGE_SUFFIX`}}'
+ image_family: '{{build_name}}-libpod'
+ source_image: '{{user `UBUNTU_BASE_IMAGE`}}'
+ disk_size: 20
+ project_id: '{{user `GCP_PROJECT_ID`}}'
+ service_account_email: '{{user `SERVICE_ACCOUNT`}}'
+ communicator: 'ssh'
+ ssh_username: '{{user `GCE_SSH_USERNAME`}}'
+ ssh_pty: 'true'
+ # The only supported zone in Cirrus-CI, as of addition of this comment
+ zone: 'us-central1-a'
+
+ # v----- is a YAML alias, allows partial re-use of the anchor object
+ - <<: *gce_hosted_image
+ name: 'rhel-7'
+ source_image: '{{user `RHEL_BASE_IMAGE`}}'
+
+ - <<: *gce_hosted_image
+ name: 'centos-7'
+ source_image: '{{user `CENTOS_BASE_IMAGE`}}'
+
+ - <<: *gce_hosted_image
+ name: 'fedora-29'
+ source_image: '{{user `FEDORA_BASE_IMAGE`}}'
+
+ - <<: *gce_hosted_image
+ name: 'fedora-28'
+ source_image: '{{user `PRIOR_FEDORA_BASE_IMAGE`}}'
+
+ - <<: *gce_hosted_image
+ name: 'fah-29'
+ source_image: '{{user `FAH_BASE_IMAGE`}}'
+
+# The brains of the operation, making actual modifications to the base-image.
+provisioners:
+ - type: 'file'
+ source: '{{user `GOSRC`}}'
+ destination: '/tmp/libpod'
+
+ - type: 'shell'
+ script: '{{user `GOSRC`}}/{{user `PACKER_BASE`}}/{{split build_name "-" 0}}_setup.sh'
+ environment_vars:
+ - 'SCRIPT_BASE={{user `SCRIPT_BASE`}}'
+ - 'CNI_COMMIT={{user `CNI_COMMIT`}}'
+ - 'FEDORA_CNI_COMMIT={{user `FEDORA_CNI_COMMIT`}}'
+ - 'CRIO_COMMIT={{user `CRIO_COMMIT`}}'
+ - 'CRIU_COMMIT={{user `CRIU_COMMIT`}}'
+ - 'RUNC_COMMIT={{user `RUNC_COMMIT`}}'
+ - 'RHSM_COMMAND={{user `RHSM_COMMAND`}}'
+
+post-processors:
+ - - type: 'manifest'
diff --git a/contrib/cirrus/packer/make-user-data.sh b/contrib/cirrus/packer/make-user-data.sh
new file mode 100644
index 000000000..7f7fa1c1a
--- /dev/null
+++ b/contrib/cirrus/packer/make-user-data.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# This script is utilized by Makefile, it's not intended to be run by humans
+
+cat <<EOF > user-data
+#cloud-config
+timezone: US/Eastern
+growpart:
+ mode: auto
+disable_root: false
+ssh_pwauth: True
+ssh_import_id: [root]
+ssh_authorized_keys:
+ - $(cat cidata.ssh.pub)
+users:
+ - name: root
+ primary-group: root
+ homedir: /root
+ system: true
+EOF
diff --git a/contrib/cirrus/packer/prior_fedora_base-setup.sh b/contrib/cirrus/packer/prior_fedora_base-setup.sh
new file mode 120000
index 000000000..998a5d9fd
--- /dev/null
+++ b/contrib/cirrus/packer/prior_fedora_base-setup.sh
@@ -0,0 +1 @@
+fedora_base-setup.sh \ No newline at end of file
diff --git a/contrib/cirrus/packer/rhel_base-setup.sh b/contrib/cirrus/packer/rhel_base-setup.sh
new file mode 100644
index 000000000..8b2073d4f
--- /dev/null
+++ b/contrib/cirrus/packer/rhel_base-setup.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# N/B: This script is not intended to be run by humans. It is used to configure the
+# rhel base image for importing, so that it will boot in GCE
+
+set -e
+
+[[ "$1" == "post" ]] || exit 0 # pre stage is not needed
+
+# Load in library (copied by packer, before this script was run)
+source $GOSRC/$SCRIPT_BASE/lib.sh
+
+req_env_var "
+ RHSM_COMMAND $RHSM_COMMAND
+"
+
+install_ooe
+
+echo "Setting up repos"
+# Frequently needed
+ooe.sh sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
+# Required for google to manage ssh keys
+ooe.sh sudo tee /etc/yum.repos.d/google-cloud-sdk.repo << EOM
+[google-cloud-compute]
+name=google-cloud-compute
+baseurl=https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+EOM
+
+rhsm_enable
+
+echo "Installing/removing packages"
+ooe.sh sudo yum -y install google-compute-engine google-compute-engine-oslogin
+ooe.sh sudo yum -y erase "cloud-init" "rh-amazon-rhui-client*" || true
+ooe.sh sudo systemctl enable \
+ google-accounts-daemon \
+ google-clock-skew-daemon \
+ google-instance-setup \
+ google-network-daemon \
+ google-shutdown-scripts \
+ google-startup-scripts
+
+rhel_exit_handler # release subscription!
+
+rh_finalize
+
+echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/rhel_setup.sh b/contrib/cirrus/packer/rhel_setup.sh
index d296713fc..99376fd65 100644
--- a/contrib/cirrus/packer/rhel_setup.sh
+++ b/contrib/cirrus/packer/rhel_setup.sh
@@ -18,24 +18,7 @@ RHSM_COMMAND $RHSM_COMMAND
install_ooe
-export GOPATH="$(mktemp -d)"
-export RHSMCMD="$(mktemp)"
-
-exit_handler() {
- set +ex
- cd /
- sudo rm -rf "$RHSMCMD"
- sudo rm -rf "$GOPATH"
- sudo subscription-manager remove --all
- sudo subscription-manager unregister
- sudo subscription-manager clean
-}
-trap "exit_handler" EXIT
-
-# Avoid logging sensitive details
-echo "$RHSM_COMMAND" > "$RHSMCMD"
-ooe.sh sudo bash "$RHSMCMD"
-sudo rm -rf "$RHSMCMD"
+rhsm_enable
ooe.sh sudo yum -y erase "rh-amazon-rhui-client*"
ooe.sh sudo subscription-manager repos "--disable=*"
@@ -47,26 +30,12 @@ ooe.sh sudo subscription-manager repos \
ooe.sh sudo yum -y update
-# Frequently needed
-ooe.sh sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-
-# Required for google to manage ssh keys
-sudo tee -a /etc/yum.repos.d/google-cloud-sdk.repo << EOM
-[google-cloud-compute]
-name=google-cloud-compute
-baseurl=https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
-enabled=1
-gpgcheck=1
-repo_gpgcheck=1
-gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
- https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-EOM
-
ooe.sh sudo yum -y install \
atomic-registries \
btrfs-progs-devel \
bzip2 \
device-mapper-devel \
+ emacs-nox \
findutils \
glib2-devel \
glibc-static \
@@ -74,8 +43,6 @@ ooe.sh sudo yum -y install \
golang \
golang-github-cpuguy83-go-md2man \
golang-github-cpuguy83-go-md2man \
- google-compute-engine \
- google-compute-engine-oslogin \
gpgme-devel \
iptables \
libassuan-devel \
@@ -103,6 +70,7 @@ ooe.sh sudo yum -y install \
runc \
skopeo-containers \
unzip \
+ vim \
which \
xz
@@ -118,7 +86,7 @@ install_criu
install_packer_copied_files
-exit_handler # release subscription!
+rhel_exit_handler # release subscription!
rh_finalize
diff --git a/contrib/cirrus/packer/ubuntu_setup.sh b/contrib/cirrus/packer/ubuntu_setup.sh
index 4cf1f335b..af5671c90 100644
--- a/contrib/cirrus/packer/ubuntu_setup.sh
+++ b/contrib/cirrus/packer/ubuntu_setup.sh
@@ -21,11 +21,14 @@ install_ooe
export GOPATH="$(mktemp -d)"
trap "sudo rm -rf $GOPATH" EXIT
+# Avoid getting stuck waiting for user input
+export DEBIAN_FRONTEND=noninteractive
+
# Try twice as workaround for minor networking problems
echo "Updating system and installing package dependencies"
-ooe.sh sudo apt-get -qq update || sudo apt-get -qq update
-ooe.sh sudo apt-get -qq upgrade || sudo apt-get -qq upgrade
-ooe.sh sudo apt-get -qq install --no-install-recommends \
+ooe.sh sudo -E apt-get -qq update || sudo -E apt-get -qq update
+ooe.sh sudo -E apt-get -qq upgrade || sudo -E apt-get -qq upgrade
+ooe.sh sudo -E apt-get -qq install --no-install-recommends \
apparmor \
autoconf \
automake \
@@ -34,6 +37,7 @@ ooe.sh sudo apt-get -qq install --no-install-recommends \
build-essential \
curl \
e2fslibs-dev \
+ emacs-nox \
gawk \
gettext \
go-md2man \
@@ -54,6 +58,8 @@ ooe.sh sudo apt-get -qq install --no-install-recommends \
libostree-dev \
libprotobuf-c0-dev \
libprotobuf-dev \
+ libseccomp-dev \
+ libseccomp2 \
libtool \
libudev-dev \
lsof \
@@ -71,6 +77,7 @@ ooe.sh sudo apt-get -qq install --no-install-recommends \
python3-setuptools \
socat \
unzip \
+ vim \
xz-utils
echo "Fixing Ubuntu kernel not enabling swap accounting by default"
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 167db127f..174bd3daf 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -4,7 +4,6 @@ set -e
source $(dirname $0)/lib.sh
req_env_var "
-CI $CI
USER $USER
HOME $HOME
ENVLIB $ENVLIB
@@ -16,12 +15,11 @@ CIRRUS_BUILD_ID $CIRRUS_BUILD_ID"
cd "$CIRRUS_WORKING_DIR" # for clarity of initial conditions
# Verify basic dependencies
-for depbin in go rsync unzip sha256sum curl make
+for depbin in go rsync unzip sha256sum curl make python3 git
do
if ! type -P "$depbin" &> /dev/null
then
- echo "ERROR: $depbin binary not found in $PATH"
- exit 2
+ echo "***** WARNING: $depbin binary not found in $PATH *****"
fi
done
@@ -35,14 +33,15 @@ then
# N/B: Single-quote items evaluated every time, double-quotes only once (right now).
for envstr in \
"$MARK" \
+ "export EPOCH_TEST_COMMIT=\"$CIRRUS_BASE_SHA\"" \
"export HEAD=\"$CIRRUS_CHANGE_IN_REPO\"" \
"export TRAVIS=\"1\"" \
"export GOSRC=\"$CIRRUS_WORKING_DIR\"" \
"export OS_RELEASE_ID=\"$(os_release_id)\"" \
"export OS_RELEASE_VER=\"$(os_release_ver)\"" \
- "export OS_REL_VER=\"${OS_RELEASE_ID}-${OS_RELEASE_VER}\"" \
+ "export OS_REL_VER=\"$(os_release_id)-$(os_release_ver)\"" \
"export BUILT_IMAGE_SUFFIX=\"-$CIRRUS_REPO_NAME-${CIRRUS_CHANGE_IN_REPO:0:8}\"" \
- "export GOPATH=\"/go\"" \
+ "export GOPATH=\"/var/tmp/go\"" \
'export PATH="$HOME/bin:$GOPATH/bin:/usr/local/bin:$PATH"' \
'export LD_LIBRARY_PATH="/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}"'
do
@@ -53,9 +52,16 @@ then
# Some setup needs to vary between distros
case "${OS_RELEASE_ID}-${OS_RELEASE_VER}" in
ubuntu-18)
+ # Always install runc on Ubuntu
+ install_runc_from_git
envstr='export BUILDTAGS="seccomp $($GOSRC/hack/btrfs_tag.sh) $($GOSRC/hack/btrfs_installed_tag.sh) $($GOSRC/hack/ostree_tag.sh) varlink exclude_graphdriver_devicemapper"'
;;
- fedora-28) ;& # Continue to the next item
+ fedora-29) ;& # Continue to the next item
+ fedora-28)
+ RUNC="https://kojipkgs.fedoraproject.org/packages/runc/1.0.0/55.dev.git578fe65.fc${OS_RELEASE_VER}/x86_64/runc-1.0.0-55.dev.git578fe65.fc${OS_RELEASE_VER}.x86_64.rpm"
+ echo ">>>>> OVERRIDING RUNC WITH $RUNC <<<<<"
+ dnf -y install "$RUNC"
+ ;& # Continue to the next item
centos-7) ;&
rhel-7)
envstr='unset BUILDTAGS' # Use default from Makefile
diff --git a/contrib/cirrus/success.sh b/contrib/cirrus/success.sh
index d1daf9043..2b0cf4655 100755
--- a/contrib/cirrus/success.sh
+++ b/contrib/cirrus/success.sh
@@ -1,22 +1,20 @@
#!/bin/bash
set -e
+
source $(dirname $0)/lib.sh
req_env_var "
- CIRRUS_TASK_NAME $CIRRUS_TASK_NAME
CIRRUS_BRANCH $CIRRUS_BRANCH
- OS_RELEASE_ID $OS_RELEASE_ID
- OS_RELEASE_VER $OS_RELEASE_VER
- CIRRUS_REPO_CLONE_URL $CIRRUS_REPO_CLONE_URL
+ CIRRUS_BUILD_ID $CIRRUS_BUILD_ID
"
-REF_URL="$(echo $CIRRUS_REPO_CLONE_URL | sed 's/.git$//g')"
+REF=$(basename $CIRRUS_BRANCH) # PR number or branch named
+URL="https://cirrus-ci.com/build/$CIRRUS_BUILD_ID"
+
if [[ "$CIRRUS_BRANCH" =~ "pull" ]]
then
- REF_URL="$REF_URL/$CIRRUS_BRANCH" # pull request URL
+ ircmsg "Cirrus-CI testing successful for PR #$REF: $URL"
else
- REF_URL="$REF_URL/commits/$CIRRUS_BRANCH" # branch merge
+ ircmsg "Cirrus-CI testing branch $REF successful: $URL"
fi
-
-ircmsg "Cirrus-CI $CIRRUS_TASK_NAME on $OS_RELEASE_ID-$OS_RELEASE_VER successful for $REF_URL"
diff --git a/contrib/cirrus/verify_source.sh b/contrib/cirrus/system_test.sh
index 860bafc00..66974f8c6 100755
--- a/contrib/cirrus/verify_source.sh
+++ b/contrib/cirrus/system_test.sh
@@ -4,11 +4,12 @@ set -e
source $(dirname $0)/lib.sh
req_env_var "
+GOSRC $GOSRC
OS_RELEASE_ID $OS_RELEASE_ID
OS_RELEASE_VER $OS_RELEASE_VER
"
-show_env_vars
+clean_env
set -x
cd "$GOSRC"
@@ -16,15 +17,17 @@ cd "$GOSRC"
case "${OS_RELEASE_ID}-${OS_RELEASE_VER}" in
ubuntu-18)
make install.tools "BUILDTAGS=$BUILDTAGS"
- make validate "BUILDTAGS=$BUILDTAGS"
- # make lint "BUILDTAGS=$BUILDTAGS"
+ make "BUILDTAGS=$BUILDTAGS"
+ make test-binaries "BUILDTAGS=$BUILDTAGS"
;;
fedora-28) ;&
centos-7) ;&
rhel-7)
make install.tools
- make validate
- # make lint
+ make
+ make test-binaries
;;
*) bad_os_id_ver ;;
esac
+
+make localsystem
diff --git a/contrib/cirrus/unit_test.sh b/contrib/cirrus/unit_test.sh
index cacc23045..61d9dc73d 100755
--- a/contrib/cirrus/unit_test.sh
+++ b/contrib/cirrus/unit_test.sh
@@ -9,22 +9,23 @@ OS_RELEASE_ID $OS_RELEASE_ID
OS_RELEASE_VER $OS_RELEASE_VER
"
-show_env_vars
+clean_env
set -x
cd "$GOSRC"
case "${OS_RELEASE_ID}-${OS_RELEASE_VER}" in
ubuntu-18)
+ make install.tools "BUILDTAGS=$BUILDTAGS"
make localunit "BUILDTAGS=$BUILDTAGS"
make "BUILDTAGS=$BUILDTAGS"
;;
- fedora-28)
+ fedora-29) ;& # Continue to the next item
+ fedora-28) ;&
+ centos-7) ;&
+ rhel-7)
+ make install.tools
make localunit
make
;;
- centos-7) ;& # Continue to the next item
- rhel-7)
- stub 'unit testing not working on $OS_RELEASE_ID'
- ;;
*) bad_os_id_ver ;;
esac
diff --git a/contrib/gate/Dockerfile b/contrib/gate/Dockerfile
new file mode 100644
index 000000000..f9b57a6da
--- /dev/null
+++ b/contrib/gate/Dockerfile
@@ -0,0 +1,69 @@
+FROM fedora:29
+RUN dnf -y install \
+ atomic-registries \
+ btrfs-progs-devel \
+ buildah \
+ bzip2 \
+ conmon \
+ container-selinux \
+ containernetworking-cni \
+ containernetworking-cni-devel \
+ device-mapper-devel \
+ findutils \
+ git \
+ glib2-devel \
+ glibc-static \
+ gnupg \
+ golang \
+ gpgme-devel \
+ iptables \
+ libassuan-devel \
+ libseccomp-devel \
+ libselinux-devel \
+ lsof \
+ make \
+ nmap-ncat \
+ ostree-devel \
+ procps-ng \
+ python \
+ python3-dateutil \
+ python3-psutil \
+ python3-pytoml \
+ python3-varlink \
+ skopeo-containers \
+ slirp4netns \
+ rsync \
+ which \
+ xz \
+ && dnf clean all
+
+ENV GOPATH="/go" \
+ PATH="/go/bin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" \
+ SRCPATH="/usr/src/libpod" \
+ GOSRC="/go/src/github.com/containers/libpod"
+
+# Only needed for installing build-time dependencies
+COPY / $GOSRC
+
+WORKDIR $GOSRC
+
+# Install dependencies
+RUN set -x && \
+ go get -u github.com/mailru/easyjson/... && \
+ install -D -m 755 "$GOPATH"/bin/easyjson /usr/bin/ && \
+ make install.tools && \
+ install -D -m 755 $GOSRC/contrib/gate/entrypoint.sh /usr/local/bin/ && \
+ rm -rf "$GOSRC"
+
+# Install cni config
+#RUN make install.cni
+RUN mkdir -p /etc/cni/net.d/
+COPY cni/87-podman-bridge.conflist /etc/cni/net.d/87-podman-bridge.conflist
+
+# Make sure we have some policy for pulling images
+RUN mkdir -p /etc/containers
+COPY test/policy.json /etc/containers/policy.json
+COPY test/redhat_sigstore.yaml /etc/containers/registries.d/registry.access.redhat.com.yaml
+
+VOLUME ["/usr/src/libpod"]
+ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
diff --git a/contrib/gate/README.md b/contrib/gate/README.md
new file mode 100644
index 000000000..709e6035f
--- /dev/null
+++ b/contrib/gate/README.md
@@ -0,0 +1,4 @@
+![PODMAN logo](../../logo/podman-logo-source.svg)
+
+A standard container image for `gofmt` and lint-checking the libpod
+repository. The [contributors guide contains the documentation for usage.](https://github.com/containers/libpod/blob/master/CONTRIBUTING.md#go-format-and-lint)
diff --git a/contrib/gate/entrypoint.sh b/contrib/gate/entrypoint.sh
new file mode 100755
index 000000000..e16094cc0
--- /dev/null
+++ b/contrib/gate/entrypoint.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+[[ -n "$SRCPATH" ]] || \
+ ( echo "ERROR: \$SRCPATH must be non-empty" && exit 1 )
+[[ -n "$GOSRC" ]] || \
+ ( echo "ERROR: \$GOSRC must be non-empty" && exit 2 )
+[[ -r "${SRCPATH}/contrib/gate/Dockerfile" ]] || \
+ ( echo "ERROR: Expecting libpod repository root at $SRCPATH" && exit 3 )
+
+# Working from a copy avoids needing to perturb the actual source files
+mkdir -p "$GOSRC"
+/usr/bin/rsync --recursive --links --quiet --safe-links \
+ --perms --times "${SRCPATH}/" "${GOSRC}/"
+cd "$GOSRC"
+make "$@"
diff --git a/contrib/python/podman/Makefile b/contrib/python/podman/Makefile
index 6ec4159f2..0cbfe2fb3 100644
--- a/contrib/python/podman/Makefile
+++ b/contrib/python/podman/Makefile
@@ -1,9 +1,10 @@
PYTHON ?= $(shell command -v python3 2>/dev/null || command -v python)
DESTDIR ?= /
-PODMAN_VERSION ?= '0.0.4'
+PODMAN_VERSION ?= '0.11.1.1'
.PHONY: python-podman
python-podman:
+ PODMAN_VERSION=$(PODMAN_VERSION) \
$(PYTHON) setup.py sdist bdist
.PHONY: lint
@@ -16,12 +17,13 @@ integration:
.PHONY: install
install:
+ PODMAN_VERSION=$(PODMAN_VERSION) \
$(PYTHON) setup.py install --root ${DESTDIR}
.PHONY: upload
upload:
- $(PODMAN_VERSION) $(PYTHON) setup.py sdist bdist_wheel
- twine upload --repository-url https://test.pypi.org/legacy/ dist/*
+ PODMAN_VERSION=$(PODMAN_VERSION) $(PYTHON) setup.py sdist bdist_wheel
+ twine upload --verbose --repository-url https://test.pypi.org/legacy/ dist/*
.PHONY: clobber
clobber: uninstall clean
diff --git a/contrib/python/podman/podman/libs/_containers_attach.py b/contrib/python/podman/podman/libs/_containers_attach.py
index f2dad573b..94247d349 100644
--- a/contrib/python/podman/podman/libs/_containers_attach.py
+++ b/contrib/python/podman/podman/libs/_containers_attach.py
@@ -19,9 +19,13 @@ class Mixin:
"""
if stdin is None:
stdin = sys.stdin.fileno()
+ elif hasattr(stdin, 'fileno'):
+ stdin = stdin.fileno()
if stdout is None:
stdout = sys.stdout.fileno()
+ elif hasattr(stdout, 'fileno'):
+ stdout = stdout.fileno()
with self._client() as podman:
attach = podman.GetAttachSockets(self._id)
@@ -49,7 +53,7 @@ class Mixin:
def resize_handler(self):
"""Send the new window size to conmon."""
- def wrapped(signum, frame):
+ def wrapped(signum, frame): # pylint: disable=unused-argument
packed = fcntl.ioctl(self.pseudo_tty.stdout, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0))
rows, cols, _, _ = struct.unpack('HHHH', packed)
@@ -67,7 +71,7 @@ class Mixin:
def log_handler(self):
"""Send command to reopen log to conmon."""
- def wrapped(signum, frame):
+ def wrapped(signum, frame): # pylint: disable=unused-argument
with open(self.pseudo_tty.control_socket, 'w') as skt:
# send conmon reopen log message
skt.write('2\n')
diff --git a/contrib/python/podman/podman/libs/containers.py b/contrib/python/podman/podman/libs/containers.py
index e211a284e..7adecea8f 100644
--- a/contrib/python/podman/podman/libs/containers.py
+++ b/contrib/python/podman/podman/libs/containers.py
@@ -1,12 +1,12 @@
"""Models for manipulating containers and storage."""
import collections
-import functools
import getpass
import json
import logging
import signal
import time
+from . import fold_keys
from ._containers_attach import Mixin as AttachMixin
from ._containers_start import Mixin as StartMixin
@@ -14,25 +14,27 @@ from ._containers_start import Mixin as StartMixin
class Container(AttachMixin, StartMixin, collections.UserDict):
"""Model for a container."""
- def __init__(self, client, id, data):
+ def __init__(self, client, ident, data, refresh=True):
"""Construct Container Model."""
super(Container, self).__init__(data)
-
self._client = client
- self._id = id
+ self._id = ident
- with client() as podman:
- self._refresh(podman)
+ if refresh:
+ with client() as podman:
+ self._refresh(podman)
+ else:
+ for k, v in self.data.items():
+ setattr(self, k, v)
+ if 'containerrunning' in self.data:
+ setattr(self, 'running', self.data['containerrunning'])
+ self.data['running'] = self.data['containerrunning']
assert self._id == data['id'],\
'Requested container id({}) does not match store id({})'.format(
self._id, data['id']
)
- def __getitem__(self, key):
- """Get items from parent dict."""
- return super().__getitem__(key)
-
def _refresh(self, podman, tries=1):
try:
ctnr = podman.GetContainer(self._id)
@@ -71,18 +73,18 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
results = podman.ListContainerChanges(self._id)
return results['container']
- def kill(self, signal=signal.SIGTERM, wait=25):
+ def kill(self, sig=signal.SIGTERM, wait=25):
"""Send signal to container.
default signal is signal.SIGTERM.
wait n of seconds, 0 waits forever.
"""
with self._client() as podman:
- podman.KillContainer(self._id, signal)
+ podman.KillContainer(self._id, sig)
timeout = time.time() + wait
while True:
self._refresh(podman)
- if self.status != 'running':
+ if self.status != 'running': # pylint: disable=no-member
return self
if wait and timeout < time.time():
@@ -90,20 +92,11 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
time.sleep(0.5)
- def _lower_hook(self):
- """Convert all keys to lowercase."""
-
- @functools.wraps(self._lower_hook)
- def wrapped(input_):
- return {k.lower(): v for (k, v) in input_.items()}
-
- return wrapped
-
def inspect(self):
"""Retrieve details about containers."""
with self._client() as podman:
results = podman.InspectContainer(self._id)
- obj = json.loads(results['container'], object_hook=self._lower_hook())
+ obj = json.loads(results['container'], object_hook=fold_keys())
return collections.namedtuple('ContainerInspect', obj.keys())(**obj)
def export(self, target):
@@ -115,19 +108,16 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
results = podman.ExportContainer(self._id, target)
return results['tarfile']
- def commit(self,
- image_name,
- *args,
- changes=[],
- message='',
- pause=True,
- **kwargs):
+ def commit(self, image_name, **kwargs):
"""Create image from container.
- All changes overwrite existing values.
- See inspect() to obtain current settings.
+ Keyword arguments:
+ author -- change image's author
+ message -- change image's message, docker format only.
+ pause -- pause container during commit
+ change -- Additional properties to change
- Changes:
+ Change examples:
CMD=/usr/bin/zsh
ENTRYPOINT=/bin/sh date
ENV=TEST=test_containers.TestContainers.test_commit
@@ -136,21 +126,23 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
USER=bozo:circus
VOLUME=/data
WORKDIR=/data/application
+
+ All changes overwrite existing values.
+ See inspect() to obtain current settings.
"""
- # TODO: Clean up *args, **kwargs after Commit() is complete
- try:
- author = kwargs.get('author', getpass.getuser())
- except Exception: # pylint: disable=broad-except
- author = ''
+ author = kwargs.get('author', None) or getpass.getuser()
+ change = kwargs.get('change', None) or []
+ message = kwargs.get('message', None) or ''
+ pause = kwargs.get('pause', None) or True
- for c in changes:
+ for c in change:
if c.startswith('LABEL=') and c.count('=') < 2:
raise ValueError(
'LABEL should have the format: LABEL=label=value, not {}'.
format(c))
with self._client() as podman:
- results = podman.Commit(self._id, image_name, changes, author,
+ results = podman.Commit(self._id, image_name, change, author,
message, pause)
return results['image']
@@ -175,7 +167,7 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
podman.RestartContainer(self._id, timeout)
return self._refresh(podman)
- def rename(self, target):
+ def rename(self, target): # pylint: disable=unused-argument
"""Rename container, return id on success."""
with self._client() as podman:
# TODO: Need arguments
@@ -183,7 +175,7 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
# TODO: fixup objects cached information
return results['container']
- def resize_tty(self, width, height):
+ def resize_tty(self, width, height): # pylint: disable=unused-argument
"""Resize container tty."""
with self._client() as podman:
# TODO: magic re: attach(), arguments
@@ -201,7 +193,8 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
podman.UnpauseContainer(self._id)
return self._refresh(podman)
- def update_container(self, *args, **kwargs):
+ def update_container(self, *args, **kwargs): \
+ # pylint: disable=unused-argument
"""TODO: Update container..., return id on success."""
with self._client() as podman:
podman.UpdateContainer()
@@ -220,7 +213,7 @@ class Container(AttachMixin, StartMixin, collections.UserDict):
obj = results['container']
return collections.namedtuple('StatDetail', obj.keys())(**obj)
- def logs(self, *args, **kwargs):
+ def logs(self, *args, **kwargs): # pylint: disable=unused-argument
"""Retrieve container logs."""
with self._client() as podman:
results = podman.GetContainerLogs(self._id)
@@ -239,7 +232,7 @@ class Containers():
with self._client() as podman:
results = podman.ListContainers()
for cntr in results['containers']:
- yield Container(self._client, cntr['id'], cntr)
+ yield Container(self._client, cntr['id'], cntr, refresh=False)
def delete_stopped(self):
"""Delete all stopped containers."""
diff --git a/contrib/python/podman/podman/libs/images.py b/contrib/python/podman/podman/libs/images.py
index 9453fb416..ae1b86390 100644
--- a/contrib/python/podman/podman/libs/images.py
+++ b/contrib/python/podman/podman/libs/images.py
@@ -27,9 +27,10 @@ class Image(collections.UserDict):
@staticmethod
def _split_token(values=None, sep='='):
+ if not values:
+ return {}
return {
- k: v1
- for k, v1 in (v0.split(sep, 1) for v0 in values if values)
+ k: v1 for k, v1 in (v0.split(sep, 1) for v0 in values)
}
def create(self, *args, **kwargs):
@@ -74,7 +75,7 @@ class Image(collections.UserDict):
obj = json.loads(results['image'], object_hook=fold_keys())
return collections.namedtuple('ImageInspect', obj.keys())(**obj)
- def push(self, target, tlsverify=False):
+ def push(self, target, tlsverify=True):
"""Copy image to target, return id on success."""
with self._client() as podman:
results = podman.PushImage(self._id, target, tlsverify)
diff --git a/contrib/python/podman/test/test_containers.py b/contrib/python/podman/test/test_containers.py
index 3de1e54bc..a7a6ac304 100644
--- a/contrib/python/podman/test/test_containers.py
+++ b/contrib/python/podman/test/test_containers.py
@@ -152,7 +152,7 @@ class TestContainers(PodmanTestCase):
changes.append('WORKDIR=/data/application')
id = self.alpine_ctnr.commit(
- 'alpine3', author='Bozo the clown', changes=changes, pause=True)
+ 'alpine3', author='Bozo the clown', change=changes, pause=True)
img = self.pclient.images.get(id)
self.assertIsNotNone(img)
diff --git a/contrib/python/podman/test/test_images.py b/contrib/python/podman/test/test_images.py
index f97e13b4c..45f0a2964 100644
--- a/contrib/python/podman/test/test_images.py
+++ b/contrib/python/podman/test/test_images.py
@@ -102,7 +102,7 @@ class TestImages(PodmanTestCase):
def test_push(self):
path = '{}/alpine_push'.format(self.tmpdir)
target = 'dir:{}'.format(path)
- self.alpine_image.push(target)
+ self.alpine_image.push(target, tlsverify=False)
self.assertTrue(os.path.isfile(os.path.join(path, 'manifest.json')))
self.assertTrue(os.path.isfile(os.path.join(path, 'version')))
diff --git a/contrib/python/podman/test/test_runner.sh b/contrib/python/podman/test/test_runner.sh
index bf097e2b2..651b2e74f 100755
--- a/contrib/python/podman/test/test_runner.sh
+++ b/contrib/python/podman/test/test_runner.sh
@@ -41,6 +41,7 @@ export TMPDIR=`mktemp -d /tmp/podman.XXXXXXXXXX`
trap "cleanup $TMPDIR" EXIT
function umount {
+ set +xeuo pipefail
# xargs -r always ran once, so write any mount points to file first
mount |awk "/$1/"' { print $3 }' >${TMPDIR}/mounts
if [[ -s ${TMPDIR}/mounts ]]; then
diff --git a/contrib/python/pypodman/Makefile b/contrib/python/pypodman/Makefile
index cd0fcf1de..230eee44d 100644
--- a/contrib/python/pypodman/Makefile
+++ b/contrib/python/pypodman/Makefile
@@ -1,9 +1,10 @@
PYTHON ?= $(shell command -v python3 2>/dev/null || command -v python)
DESTDIR := /
-PODMAN_VERSION ?= '0.0.4'
+PODMAN_VERSION ?= '0.11.1.1'
.PHONY: python-pypodman
python-pypodman:
+ PODMAN_VERSION=$(PODMAN_VERSION) \
$(PYTHON) setup.py sdist bdist
.PHONY: lint
@@ -16,11 +17,12 @@ integration:
.PHONY: install
install:
+ PODMAN_VERSION=$(PODMAN_VERSION) \
$(PYTHON) setup.py install --root ${DESTDIR}
.PHONY: upload
upload:
- $(PODMAN_VERSION) $(PYTHON) setup.py sdist bdist_wheel
+ PODMAN_VERSION=$(PODMAN_VERSION) $(PYTHON) setup.py sdist bdist_wheel
twine upload --repository-url https://test.pypi.org/legacy/ dist/*
.PHONY: clobber
diff --git a/contrib/python/pypodman/docs/man1/pypodman.1 b/contrib/python/pypodman/docs/man1/pypodman.1
index 09acb205b..45472dab0 100644
--- a/contrib/python/pypodman/docs/man1/pypodman.1
+++ b/contrib/python/pypodman/docs/man1/pypodman.1
@@ -85,7 +85,7 @@ overwriting earlier. Any missing items are ignored.
.IP \[bu] 2
From \f[C]\-\-config\-home\f[] command line option + \f[C]pypodman/pypodman.conf\f[]
.IP \[bu] 2
-From environment variable, for example: RUN_DIR
+From environment variable prefixed with PODMAN_, for example: PODMAN_RUN_DIR
.IP \[bu] 2
From command line option, for example: \[en]run\-dir
.PP
diff --git a/contrib/python/pypodman/pypodman/lib/__init__.py b/contrib/python/pypodman/pypodman/lib/__init__.py
index be1b5f467..d9a434254 100644
--- a/contrib/python/pypodman/pypodman/lib/__init__.py
+++ b/contrib/python/pypodman/pypodman/lib/__init__.py
@@ -3,18 +3,17 @@ import sys
import podman
from pypodman.lib.action_base import AbstractActionBase
-from pypodman.lib.parser_actions import (BooleanAction, BooleanValidate,
- ChangeAction, PathAction,
- PositiveIntAction, UnitAction)
+from pypodman.lib.parser_actions import (ChangeAction, PathAction,
+ PositiveIntAction, SignalAction,
+ UnitAction)
from pypodman.lib.podman_parser import PodmanArgumentParser
from pypodman.lib.report import Report, ReportColumn
# Silence pylint overlording...
-assert BooleanAction
-assert BooleanValidate
assert ChangeAction
assert PathAction
assert PositiveIntAction
+assert SignalAction
assert UnitAction
__all__ = [
diff --git a/contrib/python/pypodman/pypodman/lib/action_base.py b/contrib/python/pypodman/pypodman/lib/action_base.py
index a950c362b..5cba7ac5c 100644
--- a/contrib/python/pypodman/pypodman/lib/action_base.py
+++ b/contrib/python/pypodman/pypodman/lib/action_base.py
@@ -17,29 +17,21 @@ class AbstractActionBase(abc.ABC):
Use set_defaults() to set attributes "class_" and "method". These will
be invoked as class_(parsed_args).method()
"""
- parent.add_argument(
+ parent.add_flag(
'--all',
- action='store_true',
- help=('list all items.'
- ' (default: no-op, included for compatibility.)'))
- parent.add_argument(
- '--no-trunc',
- '--notruncate',
- action='store_false',
- dest='truncate',
+ help='list all items.')
+ parent.add_flag(
+ '--truncate',
+ '--trunc',
default=True,
- help='Display extended information. (default: False)')
- parent.add_argument(
- '--noheading',
- action='store_false',
- dest='heading',
+ help="Truncate id's and other long fields.")
+ parent.add_flag(
+ '--heading',
default=True,
- help=('Omit the table headings from the output.'
- ' (default: False)'))
- parent.add_argument(
+ help='Include table headings in the output.')
+ parent.add_flag(
'--quiet',
- action='store_true',
- help='List only the IDs. (default: %(default)s)')
+ help='List only the IDs.')
def __init__(self, args):
"""Construct class."""
diff --git a/contrib/python/pypodman/pypodman/lib/actions/__init__.py b/contrib/python/pypodman/pypodman/lib/actions/__init__.py
index 2668cd8ff..c0d77ddb1 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/__init__.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/__init__.py
@@ -22,6 +22,8 @@ from pypodman.lib.actions.rm_action import Rm
from pypodman.lib.actions.rmi_action import Rmi
from pypodman.lib.actions.run_action import Run
from pypodman.lib.actions.search_action import Search
+from pypodman.lib.actions.start_action import Start
+from pypodman.lib.actions.version_action import Version
__all__ = [
'Attach',
@@ -47,4 +49,6 @@ __all__ = [
'Rmi',
'Run',
'Search',
+ 'Start',
+ 'Version',
]
diff --git a/contrib/python/pypodman/pypodman/lib/actions/_create_args.py b/contrib/python/pypodman/pypodman/lib/actions/_create_args.py
index 207f52796..8ab4292e8 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/_create_args.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/_create_args.py
@@ -1,6 +1,6 @@
"""Implement common create container arguments together."""
-from pypodman.lib import BooleanAction, UnitAction
+from pypodman.lib import SignalAction, UnitAction
class CreateArguments():
@@ -108,11 +108,9 @@ class CreateArguments():
metavar='NODES',
help=('Memory nodes (MEMs) in which to allow execution (0-3, 0,1).'
' Only effective on NUMA systems'))
- parser.add_argument(
+ parser.add_flag(
'--detach',
'-d',
- action=BooleanAction,
- default=False,
help='Detached mode: run the container in the background and'
' print the new container ID. (default: False)')
parser.add_argument(
@@ -218,7 +216,7 @@ class CreateArguments():
# only way for argparse to handle these options.
vol_args = {
- 'choices': ['bind', 'tmpfs', 'ignore'],
+ 'choices': ('bind', 'tmpfs', 'ignore'),
'metavar': 'MODE',
'type': str.lower,
'help': 'Tells podman how to handle the builtin image volumes',
@@ -228,12 +226,10 @@ class CreateArguments():
volume_group.add_argument('--image-volume', **vol_args)
volume_group.add_argument('--builtin-volume', **vol_args)
- parser.add_argument(
+ parser.add_flag(
'--interactive',
'-i',
- action=BooleanAction,
- default=False,
- help='Keep STDIN open even if not attached. (default: False)')
+ help='Keep STDIN open even if not attached.')
parser.add_argument('--ipc', help='Create namespace')
parser.add_argument(
'--kernel-memory', action=UnitAction, help='Kernel memory limit')
@@ -278,10 +274,9 @@ class CreateArguments():
metavar='BRIDGE',
help='Set the Network mode for the container.'
' (format: bridge, host, container:UUID, ns:PATH, none)')
- parser.add_argument(
+ parser.add_flag(
'--oom-kill-disable',
- action=BooleanAction,
- help='Whether to disable OOM Killer for the container or not')
+ help='Whether to disable OOM Killer for the container or not.')
parser.add_argument(
'--oom-score-adj',
choices=range(-1000, 1000),
@@ -298,41 +293,33 @@ class CreateArguments():
help=("Tune the container's pids limit."
" Set -1 to have unlimited pids for the container."))
parser.add_argument('--pod', help='Run container in an existing pod')
- parser.add_argument(
+ parser.add_flag(
'--privileged',
- action=BooleanAction,
help='Give extended privileges to this container.')
parser.add_argument(
'--publish',
'-p',
metavar='RANGE',
help="Publish a container's port, or range of ports, to the host")
- parser.add_argument(
+ parser.add_flag(
'--publish-all',
'-P',
- action=BooleanAction,
help='Publish all exposed ports to random'
- ' ports on the host interfaces'
- '(default: False)')
- parser.add_argument(
+ ' ports on the host interfaces.')
+ parser.add_flag(
'--quiet',
'-q',
- action='store_true',
help='Suppress output information when pulling images')
- parser.add_argument(
+ parser.add_flag(
'--read-only',
- action=BooleanAction,
help="Mount the container's root filesystem as read only.")
- parser.add_argument(
+ parser.add_flag(
'--rm',
- action=BooleanAction,
- default=False,
help='Automatically remove the container when it exits.')
parser.add_argument(
'--rootfs',
- action='store_true',
- help=('If specified, the first argument refers to an'
- ' exploded container on the file system of remote host.'))
+ help='If specified, the first argument refers to an'
+ ' exploded container on the file system of remote host.')
parser.add_argument(
'--security-opt',
action='append',
@@ -340,15 +327,14 @@ class CreateArguments():
help='Set security options.')
parser.add_argument(
'--shm-size', action=UnitAction, help='Size of /dev/shm')
- parser.add_argument(
+ parser.add_flag(
'--sig-proxy',
- action=BooleanAction,
- default=True,
help='Proxy signals sent to the podman run'
' command to the container process')
parser.add_argument(
'--stop-signal',
- metavar='SIGTERM',
+ action=SignalAction,
+ default='TERM',
help='Signal to stop a container')
parser.add_argument(
'--stop-timeout',
@@ -374,11 +360,9 @@ class CreateArguments():
metavar='MOUNT',
help='Create a tmpfs mount.'
' (default: rw,noexec,nosuid,nodev,size=65536k.)')
- parser.add_argument(
+ parser.add_flag(
'--tty',
'-t',
- action=BooleanAction,
- default=False,
help='Allocate a pseudo-TTY for standard input of container.')
parser.add_argument(
'--uidmap',
@@ -394,15 +378,16 @@ class CreateArguments():
parser.add_argument(
'--user',
'-u',
- help=('Sets the username or UID used and optionally'
- ' the groupname or GID for the specified command.'))
+ help='Sets the username or UID used and optionally'
+ ' the groupname or GID for the specified command.')
parser.add_argument(
'--userns',
metavar='NAMESPACE',
help='Set the user namespace mode for the container')
parser.add_argument(
'--uts',
- choices=['host', 'ns'],
+ choices=('host', 'ns'),
+ type=str.lower,
help='Set the UTS mode for the container')
parser.add_argument('--volume', '-v', help='Create a bind mount.')
parser.add_argument(
diff --git a/contrib/python/pypodman/pypodman/lib/actions/commit_action.py b/contrib/python/pypodman/pypodman/lib/actions/commit_action.py
index 21665ad0b..c166e1aff 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/commit_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/commit_action.py
@@ -2,7 +2,7 @@
import sys
import podman
-from pypodman.lib import AbstractActionBase, BooleanAction, ChangeAction
+from pypodman.lib import AbstractActionBase, ChangeAction
class Commit(AbstractActionBase):
@@ -30,7 +30,8 @@ class Commit(AbstractActionBase):
choices=('oci', 'docker'),
default='oci',
type=str.lower,
- help='Set the format of the image manifest and metadata',
+ help='Set the format of the image manifest and metadata.'
+ ' (Ignored.)',
)
parser.add_argument(
'--iidfile',
@@ -40,19 +41,17 @@ class Commit(AbstractActionBase):
parser.add_argument(
'--message',
'-m',
- help='Set commit message for committed image',
+ help='Set commit message for committed image'
+ ' (Only on docker images.)',
)
- parser.add_argument(
+ parser.add_flag(
'--pause',
'-p',
- action=BooleanAction,
- default=True,
help='Pause the container when creating an image',
)
- parser.add_argument(
+ parser.add_flag(
'--quiet',
'-q',
- action='store_true',
help='Suppress output',
)
parser.add_argument(
@@ -80,8 +79,16 @@ class Commit(AbstractActionBase):
flush=True)
return 1
else:
- ident = ctnr.commit(self.opts['image'][0], **self.opts)
- print(ident)
+ ident = ctnr.commit(
+ self.opts['image'][0],
+ change=self.opts.get('change', None),
+ message=self.opts.get('message', None),
+ pause=self.opts['pause'],
+ author=self.opts.get('author', None),
+ )
+
+ if not self.opts['quiet']:
+ print(ident)
except podman.ErrorOccurred as e:
sys.stdout.flush()
print(
diff --git a/contrib/python/pypodman/pypodman/lib/actions/create_action.py b/contrib/python/pypodman/pypodman/lib/actions/create_action.py
index d9631763a..26a312bb1 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/create_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/create_action.py
@@ -21,7 +21,7 @@ class Create(AbstractActionBase):
parser.add_argument('image', nargs=1, help='source image id')
parser.add_argument(
'command',
- nargs='*',
+ nargs=parent.REMAINDER,
help='command and args to run.',
)
parser.set_defaults(class_=cls, method='create')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/history_action.py b/contrib/python/pypodman/pypodman/lib/actions/history_action.py
index f9aaa54f6..76c3ad756 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/history_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/history_action.py
@@ -5,8 +5,7 @@ from collections import OrderedDict
import humanize
import podman
-from pypodman.lib import (AbstractActionBase, BooleanAction, Report,
- ReportColumn)
+from pypodman.lib import AbstractActionBase, Report, ReportColumn
class History(AbstractActionBase):
@@ -17,13 +16,10 @@ class History(AbstractActionBase):
"""Add History command to parent parser."""
parser = parent.add_parser('history', help='report image history')
super().subparser(parser)
- parser.add_argument(
+ parser.add_flag(
'--human',
'-H',
- action=BooleanAction,
- default='True',
- help='Display sizes and dates in human readable format.'
- ' (default: %(default)s)')
+ help='Display sizes and dates in human readable format.')
parser.add_argument(
'--format',
choices=('json', 'table'),
diff --git a/contrib/python/pypodman/pypodman/lib/actions/images_action.py b/contrib/python/pypodman/pypodman/lib/actions/images_action.py
index 29bf90dd2..21376eeeb 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/images_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/images_action.py
@@ -24,11 +24,9 @@ class Images(AbstractActionBase):
help=('Change sort ordered of displayed images.'
' (default: %(default)s)'))
- group = parser.add_mutually_exclusive_group()
- group.add_argument(
+ parser.add_flag(
'--digests',
- action='store_true',
- help='Include digests with images. (default: %(default)s)')
+ help='Include digests with images.')
parser.set_defaults(class_=cls, method='list')
def __init__(self, args):
diff --git a/contrib/python/pypodman/pypodman/lib/actions/info_action.py b/contrib/python/pypodman/pypodman/lib/actions/info_action.py
index 988284541..3c854a358 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/info_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/info_action.py
@@ -22,10 +22,6 @@ class Info(AbstractActionBase):
" (default: yaml)")
parser.set_defaults(class_=cls, method='info')
- def __init__(self, args):
- """Construct Info class."""
- super().__init__(args)
-
def info(self):
"""Report on Podman Service."""
try:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/inspect_action.py b/contrib/python/pypodman/pypodman/lib/actions/inspect_action.py
index 514b4702a..ca5ad2215 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/inspect_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/inspect_action.py
@@ -22,12 +22,9 @@ class Inspect(AbstractActionBase):
type=str.lower,
help='Type of object to inspect',
)
- parser.add_argument(
- 'size',
- action='store_true',
- default=True,
- help='Display the total file size if the type is a container.'
- ' Always True.')
+ parser.add_flag(
+ '--size',
+ help='Display the total file size if the type is a container.')
parser.add_argument(
'objects',
nargs='+',
@@ -35,10 +32,6 @@ class Inspect(AbstractActionBase):
)
parser.set_defaults(class_=cls, method='inspect')
- def __init__(self, args):
- """Construct Inspect class."""
- super().__init__(args)
-
def _get_container(self, ident):
try:
logging.debug("Getting container %s", ident)
@@ -59,7 +52,7 @@ class Inspect(AbstractActionBase):
def inspect(self):
"""Inspect provided podman objects."""
- output = {}
+ output = []
try:
for ident in self._args.objects:
obj = None
@@ -78,7 +71,13 @@ class Inspect(AbstractActionBase):
msg = 'Object "{}" not found'.format(ident)
print(msg, file=sys.stderr, flush=True)
else:
- output.update(obj._asdict())
+ fields = obj._asdict()
+ if not self._args.size:
+ try:
+ del fields['sizerootfs']
+ except KeyError:
+ pass
+ output.append(fields)
except podman.ErrorOccurred as e:
sys.stdout.flush()
print(
diff --git a/contrib/python/pypodman/pypodman/lib/actions/kill_action.py b/contrib/python/pypodman/pypodman/lib/actions/kill_action.py
index cb3d3f035..e8fb4e74d 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/kill_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/kill_action.py
@@ -1,9 +1,8 @@
"""Remote client command for signaling podman containers."""
-import signal
import sys
import podman
-from pypodman.lib import AbstractActionBase
+from pypodman.lib import AbstractActionBase, SignalAction
class Kill(AbstractActionBase):
@@ -16,10 +15,9 @@ class Kill(AbstractActionBase):
parser.add_argument(
'--signal',
'-s',
- choices=range(1, signal.NSIG),
- metavar='[1,{}]'.format(signal.NSIG),
+ action=SignalAction,
default=9,
- help='Signal to send to the container. (default: 9)')
+ help='Signal to send to the container. (default: %(default)s)')
parser.add_argument(
'containers',
nargs='+',
@@ -27,10 +25,6 @@ class Kill(AbstractActionBase):
)
parser.set_defaults(class_=cls, method='kill')
- def __init__(self, args):
- """Construct Kill class."""
- super().__init__(args)
-
def kill(self):
"""Signal provided containers."""
try:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pause_action.py b/contrib/python/pypodman/pypodman/lib/actions/pause_action.py
index ab64d8b81..7dc02f7fe 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pause_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pause_action.py
@@ -19,10 +19,6 @@ class Pause(AbstractActionBase):
)
parser.set_defaults(class_=cls, method='pause')
- def __init__(self, args):
- """Construct Pause class."""
- super().__init__(args)
-
def pause(self):
"""Pause provided containers."""
try:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/create_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/create_parser.py
index 46c1e3e51..4e0bde777 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/create_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/create_parser.py
@@ -2,7 +2,7 @@
import sys
import podman
-from pypodman.lib import AbstractActionBase, BooleanAction
+from pypodman.lib import AbstractActionBase
class CreatePod(AbstractActionBase):
@@ -20,12 +20,9 @@ class CreatePod(AbstractActionBase):
type=str,
help='Path to cgroups under which the'
' cgroup for the pod will be created.')
- parser.add_argument(
+ parser.add_flag(
'--infra',
- action=BooleanAction,
- default=True,
- help='Create an infra container and associate it with the pod'
- '(default: %(default)s)')
+ help='Create an infra container and associate it with the pod.')
parser.add_argument(
'-l',
'--label',
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/kill_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/kill_parser.py
index 430ec34e0..9b6229939 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/kill_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/kill_parser.py
@@ -3,7 +3,7 @@ import signal
import sys
import podman
-from pypodman.lib import AbstractActionBase
+from pypodman.lib import AbstractActionBase, SignalAction
from pypodman.lib import query_model as query_pods
@@ -15,18 +15,16 @@ class KillPod(AbstractActionBase):
"""Add Pod Kill command to parent parser."""
parser = parent.add_parser('kill', help='signal containers in pod')
- parser.add_argument(
- '-a',
+ parser.add_flag(
'--all',
- action='store_true',
- help='Sends signal to all pods')
+ '-a',
+ help='Sends signal to all pods.')
parser.add_argument(
'-s',
'--signal',
- choices=range(1, signal.NSIG),
- metavar='[1,{}]'.format(signal.NSIG),
+ action=SignalAction,
default=9,
- help='Signal to send to the pod. (default: 9)')
+ help='Signal to send to the pod. (default: %(default)s)')
parser.add_argument('pod', nargs='*', help='pod(s) to signal')
parser.set_defaults(class_=cls, method='kill')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/pause_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/pause_parser.py
index daae028d4..c751314ca 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/pause_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/pause_parser.py
@@ -13,8 +13,10 @@ class PausePod(AbstractActionBase):
def subparser(cls, parent):
"""Add Pod Pause command to parent parser."""
parser = parent.add_parser('pause', help='pause containers in pod')
- parser.add_argument(
- '-a', '--all', action='store_true', help='Pause all pods')
+ parser.add_flag(
+ '--all',
+ '-a',
+ help='Pause all pods.')
parser.add_argument('pod', nargs='*', help='pod(s) to pause.')
parser.set_defaults(class_=cls, method='pause')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py
index ecfcb883a..855e313c7 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py
@@ -14,18 +14,15 @@ class ProcessesPod(AbstractActionBase):
parser = parent.add_parser('ps', help='list processes of pod')
super().subparser(parser)
- parser.add_argument(
+ parser.add_flag(
'--ctr-names',
- action='store_true',
- help='Include container name in the info field')
- parser.add_argument(
+ help='Include container name in the info field.')
+ parser.add_flag(
'--ctr-ids',
- action='store_true',
- help='Include container ID in the info field')
- parser.add_argument(
+ help='Include container ID in the info field.')
+ parser.add_flag(
'--ctr-status',
- action='store_true',
- help='Include container status in the info field')
+ help='Include container status in the info field.')
parser.add_argument(
'--format',
choices=('json'),
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/remove_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/remove_parser.py
index 40eeb7203..289325d14 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/remove_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/remove_parser.py
@@ -13,13 +13,14 @@ class RemovePod(AbstractActionBase):
def subparser(cls, parent):
"""Add Pod Rm command to parent parser."""
parser = parent.add_parser('rm', help='Delete pod and container(s)')
- parser.add_argument(
- '-a', '--all', action='store_true', help='Remove all pods')
- parser.add_argument(
- '-f',
+ parser.add_flag(
+ '--all',
+ '-a',
+ help='Remove all pods.')
+ parser.add_flag(
'--force',
- action='store_true',
- help='Stop and remove container(s) then delete pod')
+ '-f',
+ help='Stop and remove container(s) then delete pod.')
parser.add_argument(
'pod', nargs='*', help='Pod to remove. Or, use --all')
parser.set_defaults(class_=cls, method='remove')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/restart_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/restart_parser.py
index af489ad28..53f45b6de 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/restart_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/restart_parser.py
@@ -13,8 +13,10 @@ class RestartPod(AbstractActionBase):
def subparser(cls, parent):
"""Add Pod Restart command to parent parser."""
parser = parent.add_parser('restart', help='restart containers in pod')
- parser.add_argument(
- '-a', '--all', action='store_true', help='Restart all pods')
+ parser.add_flag(
+ '--all',
+ '-a',
+ help='Restart all pods.')
parser.add_argument(
'pod', nargs='*', help='Pod to restart. Or, use --all')
parser.set_defaults(class_=cls, method='restart')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/start_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/start_parser.py
index 0ddc336bf..ff62b839e 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/start_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/start_parser.py
@@ -14,8 +14,10 @@ class StartPod(AbstractActionBase):
def subparser(cls, parent):
"""Add Pod Start command to parent parser."""
parser = parent.add_parser('start', help='start pod')
- parser.add_argument(
- '-a', '--all', action='store_true', help='Start all pods')
+ parser.add_flag(
+ '--all',
+ '-a',
+ help='Start all pods.')
parser.add_argument(
'pod', nargs='*', help='Pod to start. Or, use --all')
parser.set_defaults(class_=cls, method='start')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/stop_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/stop_parser.py
index 7054fd38a..cbf2bf1e7 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/stop_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/stop_parser.py
@@ -13,8 +13,10 @@ class StopPod(AbstractActionBase):
def subparser(cls, parent):
"""Add Pod Stop command to parent parser."""
parser = parent.add_parser('stop', help='stop pod')
- parser.add_argument(
- '-a', '--all', action='store_true', help='Stop all pods')
+ parser.add_flag(
+ '--all',
+ '-a',
+ help='Stop all pods.')
parser.add_argument(
'pod', nargs='*', help='Pod to stop. Or, use --all')
parser.set_defaults(class_=cls, method='stop')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/unpause_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/unpause_parser.py
index 90e1ddbe2..5186cf9cc 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/unpause_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/unpause_parser.py
@@ -13,8 +13,10 @@ class UnpausePod(AbstractActionBase):
def subparser(cls, parent):
"""Add Pod Unpause command to parent parser."""
parser = parent.add_parser('unpause', help='unpause pod')
- parser.add_argument(
- '-a', '--all', action='store_true', help='Unpause all pods')
+ parser.add_flag(
+ '--all',
+ '-a',
+ help='Unpause all pods.')
parser.add_argument(
'pod', nargs='*', help='Pod to unpause. Or, use --all')
parser.set_defaults(class_=cls, method='unpause')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod_action.py b/contrib/python/pypodman/pypodman/lib/actions/pod_action.py
index 046af34bb..4b8997a05 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod_action.py
@@ -5,6 +5,8 @@ import sys
from pypodman.lib import AbstractActionBase
+# pylint: disable=wildcard-import
+# pylint: disable=unused-wildcard-import
from .pod import *
diff --git a/contrib/python/pypodman/pypodman/lib/actions/port_action.py b/contrib/python/pypodman/pypodman/lib/actions/port_action.py
index d2a8ded46..6913f3813 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/port_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/port_action.py
@@ -13,16 +13,13 @@ class Port(AbstractActionBase):
"""Add Port command to parent parser."""
parser = parent.add_parser(
'port', help='retrieve ports from containers')
- parser.add_argument(
+ parser.add_flag(
'--all',
'-a',
- action='store_true',
- default=False,
help='List all known port mappings for running containers')
parser.add_argument(
'containers',
nargs='*',
- default=None,
help='containers to list ports',
)
parser.set_defaults(class_=cls, method='port')
@@ -61,3 +58,4 @@ class Port(AbstractActionBase):
file=sys.stderr,
flush=True)
return 1
+ return 0
diff --git a/contrib/python/pypodman/pypodman/lib/actions/ps_action.py b/contrib/python/pypodman/pypodman/lib/actions/ps_action.py
index cd7a7947d..62ceb2e67 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/ps_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/ps_action.py
@@ -16,6 +16,7 @@ class Ps(AbstractActionBase):
"""Add Images command to parent parser."""
parser = parent.add_parser('ps', help='list containers')
super().subparser(parser)
+
parser.add_argument(
'--sort',
choices=('createdat', 'id', 'image', 'names', 'runningfor', 'size',
@@ -32,9 +33,9 @@ class Ps(AbstractActionBase):
self.columns = OrderedDict({
'id':
- ReportColumn('id', 'CONTAINER ID', 14),
+ ReportColumn('id', 'CONTAINER ID', 12),
'image':
- ReportColumn('image', 'IMAGE', 30),
+ ReportColumn('image', 'IMAGE', 31),
'command':
ReportColumn('column', 'COMMAND', 20),
'createdat':
@@ -49,10 +50,15 @@ class Ps(AbstractActionBase):
def list(self):
"""List containers."""
+ if self._args.all:
+ ictnrs = self.client.containers.list()
+ else:
+ ictnrs = filter(
+ lambda c: podman.FoldedString(c['status']) == 'running',
+ self.client.containers.list())
+
# TODO: Verify sorting on dates and size
- ctnrs = sorted(
- self.client.containers.list(),
- key=operator.attrgetter(self._args.sort))
+ ctnrs = sorted(ictnrs, key=operator.attrgetter(self._args.sort))
if not ctnrs:
return
@@ -65,9 +71,6 @@ class Ps(AbstractActionBase):
'createdat':
humanize.naturaldate(podman.datetime_parse(ctnr.createdat)),
})
-
- if self._args.truncate:
- fields.update({'image': ctnr.image[-30:]})
rows.append(fields)
with Report(self.columns, heading=self._args.heading) as report:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/push_action.py b/contrib/python/pypodman/pypodman/lib/actions/push_action.py
index 0030cb5b9..8e86ca335 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/push_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/push_action.py
@@ -15,12 +15,10 @@ class Push(AbstractActionBase):
'push',
help='push image elsewhere',
)
- parser.add_argument(
+ parser.add_flag(
'--tlsverify',
- action='store_true',
- default=True,
help='Require HTTPS and verify certificates when'
- ' contacting registries (default: %(default)s)')
+ ' contacting registries.')
parser.add_argument(
'image', nargs=1, help='name or id of image to push')
parser.add_argument(
@@ -30,10 +28,6 @@ class Push(AbstractActionBase):
)
parser.set_defaults(class_=cls, method='push')
- def __init__(self, args):
- """Construct Push class."""
- super().__init__(args)
-
def pull(self):
"""Store image elsewhere."""
try:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/restart_action.py b/contrib/python/pypodman/pypodman/lib/actions/restart_action.py
index d99d1ad65..415594920 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/restart_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/restart_action.py
@@ -23,10 +23,6 @@ class Restart(AbstractActionBase):
'targets', nargs='+', help='container id(s) to restart')
parser.set_defaults(class_=cls, method='restart')
- def __init__(self, args):
- """Construct Restart class."""
- super().__init__(args)
-
def restart(self):
"""Restart container(s)."""
try:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/rm_action.py b/contrib/python/pypodman/pypodman/lib/actions/rm_action.py
index e8074ef4e..99ff6c460 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/rm_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/rm_action.py
@@ -12,20 +12,14 @@ class Rm(AbstractActionBase):
def subparser(cls, parent):
"""Add Rm command to parent parser."""
parser = parent.add_parser('rm', help='delete container(s)')
- parser.add_argument(
- '-f',
+ parser.add_flag(
'--force',
- action='store_true',
- help=('force delete of running container(s).'
- ' (default: %(default)s)'))
+ '-f',
+ help='force delete of running container(s).')
parser.add_argument(
'targets', nargs='+', help='container id(s) to delete')
parser.set_defaults(class_=cls, method='remove')
- def __init__(self, args):
- """Construct Rm class."""
- super().__init__(args)
-
def remove(self):
"""Remove container(s)."""
for ident in self._args.targets:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/rmi_action.py b/contrib/python/pypodman/pypodman/lib/actions/rmi_action.py
index c6ba835cb..7c3d0bd79 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/rmi_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/rmi_action.py
@@ -12,19 +12,13 @@ class Rmi(AbstractActionBase):
def subparser(cls, parent):
"""Add Rmi command to parent parser."""
parser = parent.add_parser('rmi', help='delete image(s)')
- parser.add_argument(
- '-f',
+ parser.add_flag(
'--force',
- action='store_true',
- help=('force delete of image(s) and associated containers.'
- ' (default: %(default)s)'))
+ '-f',
+ help='force delete of image(s) and associated containers.')
parser.add_argument('targets', nargs='+', help='image id(s) to delete')
parser.set_defaults(class_=cls, method='remove')
- def __init__(self, args):
- """Construct Rmi class."""
- super().__init__(args)
-
def remove(self):
"""Remove image(s)."""
for ident in self._args.targets:
diff --git a/contrib/python/pypodman/pypodman/lib/actions/run_action.py b/contrib/python/pypodman/pypodman/lib/actions/run_action.py
index a63eb7917..6a6b3cb2c 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/run_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/run_action.py
@@ -21,7 +21,7 @@ class Run(AbstractActionBase):
parser.add_argument('image', nargs=1, help='source image id.')
parser.add_argument(
'command',
- nargs='*',
+ nargs=parent.REMAINDER,
help='command and args to run.',
)
parser.set_defaults(class_=cls, method='run')
diff --git a/contrib/python/pypodman/pypodman/lib/actions/search_action.py b/contrib/python/pypodman/pypodman/lib/actions/search_action.py
index d2a585d92..b7b8b465d 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/search_action.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/search_action.py
@@ -4,8 +4,8 @@ import sys
from collections import OrderedDict
import podman
-from pypodman.lib import (AbstractActionBase, BooleanValidate,
- PositiveIntAction, Report, ReportColumn)
+from pypodman.lib import (AbstractActionBase, PositiveIntAction, Report,
+ ReportColumn)
class FilterAction(argparse.Action):
@@ -58,16 +58,16 @@ class FilterAction(argparse.Action):
if val < 0:
parser.error(msg)
elif opt == 'is-automated':
- try:
- val = BooleanValidate()(val)
- except ValueError:
+ if val.capitalize() in ('True', 'False'):
+ val = bool(val)
+ else:
msg = ('{} option "is-automated"'
' must be True or False.'.format(self.dest))
parser.error(msg)
elif opt == 'is-official':
- try:
- val = BooleanValidate()(val)
- except ValueError:
+ if val.capitalize() in ('True', 'False'):
+ val = bool(val)
+ else:
msg = ('{} option "is-official"'
' must be True or False.'.format(self.dest))
parser.error(msg)
diff --git a/contrib/python/pypodman/pypodman/lib/actions/start_action.py b/contrib/python/pypodman/pypodman/lib/actions/start_action.py
new file mode 100644
index 000000000..5f88731dc
--- /dev/null
+++ b/contrib/python/pypodman/pypodman/lib/actions/start_action.py
@@ -0,0 +1,71 @@
+"""Remote client command for starting containers."""
+import sys
+
+import podman
+from pypodman.lib import AbstractActionBase
+
+
+class Start(AbstractActionBase):
+ """Class for starting container."""
+
+ @classmethod
+ def subparser(cls, parent):
+ """Add Start command to parent parser."""
+ parser = parent.add_parser('start', help='start container')
+ parser.add_flag(
+ '--attach',
+ '-a',
+ help="Attach container's STDOUT and STDERR.")
+ parser.add_argument(
+ '--detach-keys',
+ metavar='KEY(s)',
+ default=4,
+ help='Override the key sequence for detaching a container.'
+ ' (format: a single character [a-Z] or ctrl-<value> where'
+ ' <value> is one of: a-z, @, ^, [, , or _) (default: ^D)')
+ parser.add_flag(
+ '--interactive',
+ '-i',
+ help="Attach container's STDIN.")
+ # TODO: Implement sig-proxy
+ parser.add_flag(
+ '--sig-proxy',
+ help="Proxy received signals to the process."
+ )
+ parser.add_argument(
+ 'containers',
+ nargs='+',
+ help='containers to start',
+ )
+ parser.set_defaults(class_=cls, method='start')
+
+ def start(self):
+ """Start provided containers."""
+ stdin = sys.stdin if self.opts['interactive'] else None
+ stdout = sys.stdout if self.opts['attach'] else None
+
+ try:
+ for ident in self._args.containers:
+ try:
+ ctnr = self.client.containers.get(ident)
+ ctnr.attach(
+ eot=self.opts['detach_keys'],
+ stdin=stdin,
+ stdout=stdout)
+ ctnr.start()
+ except podman.ContainerNotFound as e:
+ sys.stdout.flush()
+ print(
+ 'Container "{}" not found'.format(e.name),
+ file=sys.stderr,
+ flush=True)
+ else:
+ print(ident)
+ except podman.ErrorOccurred as e:
+ sys.stdout.flush()
+ print(
+ '{}'.format(e.reason).capitalize(),
+ file=sys.stderr,
+ flush=True)
+ return 1
+ return 0
diff --git a/contrib/python/pypodman/pypodman/lib/actions/version_action.py b/contrib/python/pypodman/pypodman/lib/actions/version_action.py
new file mode 100644
index 000000000..29a0cabe4
--- /dev/null
+++ b/contrib/python/pypodman/pypodman/lib/actions/version_action.py
@@ -0,0 +1,35 @@
+"""Remote client command for reporting on Podman service."""
+import sys
+
+import podman
+from pypodman.lib import AbstractActionBase
+
+
+class Version(AbstractActionBase):
+ """Class for reporting on Podman Service."""
+
+ @classmethod
+ def subparser(cls, parent):
+ """Add Version command to parent parser."""
+ parser = parent.add_parser(
+ 'version', help='report version on podman service')
+ parser.set_defaults(class_=cls, method='version')
+
+ def version(self):
+ """Report on Podman Service."""
+ try:
+ info = self.client.system.info()
+ except podman.ErrorOccurred as e:
+ sys.stdout.flush()
+ print(
+ '{}'.format(e.reason).capitalize(),
+ file=sys.stderr,
+ flush=True)
+ return 1
+ else:
+ version = info._asdict()['podman']
+ host = info._asdict()['host']
+ print("Version {}".format(version['podman_version']))
+ print("Go Version {}".format(version['go_version']))
+ print("Git Commit {}".format(version['git_commit']))
+ print("OS/Arch {}/{}".format(host["os"], host["arch"]))
diff --git a/contrib/python/pypodman/pypodman/lib/parser_actions.py b/contrib/python/pypodman/pypodman/lib/parser_actions.py
index c10b85495..3ff12cab8 100644
--- a/contrib/python/pypodman/pypodman/lib/parser_actions.py
+++ b/contrib/python/pypodman/pypodman/lib/parser_actions.py
@@ -6,6 +6,7 @@ The constructors are very verbose but remain for IDE support.
import argparse
import copy
import os
+import signal
# API defined by argparse.Action therefore shut up pylint
# pragma pylint: disable=redefined-builtin
@@ -13,22 +14,8 @@ import os
# pragma pylint: disable=too-many-arguments
-class BooleanValidate():
- """Validate value is boolean string."""
-
- def __call__(self, value):
- """Return True, False or raise ValueError."""
- val = value.capitalize()
- if val == 'False':
- return False
- elif val == 'True':
- return True
- else:
- raise ValueError('"{}" is not True or False'.format(value))
-
-
-class BooleanAction(argparse.Action):
- """Convert and validate bool argument."""
+class ChangeAction(argparse.Action):
+ """Convert and validate change argument."""
def __init__(self,
option_strings,
@@ -37,11 +24,16 @@ class BooleanAction(argparse.Action):
const=None,
default=None,
type=None,
- choices=('True', 'False'),
+ choices=None,
required=False,
help=None,
- metavar='{True,False}'):
- """Create BooleanAction object."""
+ metavar='OPT=VALUE'):
+ """Create ChangeAction object."""
+ help = (help or '') + ('Apply change(s) to the new image.'
+ ' May be given multiple times.')
+ if default is None:
+ default = []
+
super().__init__(
option_strings=option_strings,
dest=dest,
@@ -56,32 +48,37 @@ class BooleanAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
"""Convert and Validate input."""
- try:
- val = BooleanValidate()(values)
- except ValueError:
- parser.error('{} must be True or False.'.format(self.dest))
- else:
- setattr(namespace, self.dest, val)
+ items = getattr(namespace, self.dest, None) or []
+ items = copy.copy(items)
+ choices = ('CMD', 'ENTRYPOINT', 'ENV', 'EXPOSE', 'LABEL', 'ONBUILD',
+ 'STOPSIGNAL', 'USER', 'VOLUME', 'WORKDIR')
-class ChangeAction(argparse.Action):
- """Convert and validate change argument."""
+ opt, _ = values.split('=', 1)
+ if opt not in choices:
+ parser.error('Option "{}" is not supported by argument "{}",'
+ ' valid options are: {}'.format(
+ opt, option_string, ', '.join(choices)))
+ items.append(values)
+ setattr(namespace, self.dest, items)
+
+
+class SignalAction(argparse.Action):
+ """Validate input as a signal."""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
- default=[],
- type=None,
+ default=None,
+ type=str,
choices=None,
required=False,
- help=None,
- metavar='OPT=VALUE'):
- """Create ChangeAction object."""
- help = (help or '') + ('Apply change(s) to the new image.'
- ' May be given multiple times.')
-
+ help='The signal to send.'
+ ' It may be given as a name or a number.',
+ metavar='SIGNAL'):
+ """Create SignalAction object."""
super().__init__(
option_strings=option_strings,
dest=dest,
@@ -94,22 +91,40 @@ class ChangeAction(argparse.Action):
help=help,
metavar=metavar)
- def __call__(self, parser, namespace, values, option_string=None):
- """Convert and Validate input."""
- print(self.dest)
- items = getattr(namespace, self.dest, None) or []
- items = copy.copy(items)
+ if hasattr(signal, "Signals"):
- choices = ('CMD', 'ENTRYPOINT', 'ENV', 'EXPOSE', 'LABEL', 'ONBUILD',
- 'STOPSIGNAL', 'USER', 'VOLUME', 'WORKDIR')
+ def _signal_number(signame):
+ cooked = 'SIG{}'.format(signame)
+ try:
+ return signal.Signals[cooked].value
+ except ValueError:
+ pass
+ else:
- opt, val = values.split('=', 1)
- if opt not in choices:
- parser.error('{} is not a supported "--change" option,'
- ' valid options are: {}'.format(
- opt, ', '.join(choices)))
- items.append(values)
- setattr(namespace, self.dest, items)
+ def _signal_number(signame):
+ cooked = 'SIG{}'.format(signame)
+ for n, v in sorted(signal.__dict__.items()):
+ if n != cooked:
+ continue
+ if n.startswith("SIG") and not n.startswith("SIG_"):
+ return v
+
+ self._signal_number = _signal_number
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ """Validate input is a signal for platform."""
+ if values.isdigit():
+ signum = int(values)
+ if signal.SIGRTMIN <= signum >= signal.SIGRTMAX:
+ raise ValueError('"{}" is not a valid signal. {}-{}'.format(
+ values, signal.SIGRTMIN, signal.SIGRTMAX))
+ else:
+ signum = self._signal_number(values)
+ if signum is None:
+ parser.error(
+ '"{}" is not a valid signal,'
+ ' see your platform documentation.'.format(values))
+ setattr(namespace, self.dest, signum)
class UnitAction(argparse.Action):
@@ -127,8 +142,8 @@ class UnitAction(argparse.Action):
help=None,
metavar='UNIT'):
"""Create UnitAction object."""
- help = (help or metavar or dest
- ) + ' (format: <number>[<unit>], where unit = b, k, m or g)'
+ help = (help or metavar or dest)\
+ + ' (format: <number>[<unit>], where unit = b, k, m or g)'
super().__init__(
option_strings=option_strings,
dest=dest,
@@ -148,15 +163,15 @@ class UnitAction(argparse.Action):
except ValueError:
if not values[:-1].isdigit():
msg = ('{} must be a positive integer,'
- ' with optional suffix').format(self.dest)
+ ' with optional suffix').format(option_string)
parser.error(msg)
if not values[-1] in ('b', 'k', 'm', 'g'):
msg = '{} only supports suffices of: b, k, m, g'.format(
- self.dest)
+ option_string)
parser.error(msg)
else:
if val <= 0:
- msg = '{} must be a positive integer'.format(self.dest)
+ msg = '{} must be a positive integer'.format(option_string)
parser.error(msg)
setattr(namespace, self.dest, values)
@@ -174,19 +189,16 @@ class PositiveIntAction(argparse.Action):
type=int,
choices=None,
required=False,
- help=None,
+ help='Must be a positive integer.',
metavar=None):
"""Create PositiveIntAction object."""
- self.message = '{} must be a positive integer'.format(dest)
- help = help or self.message
-
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
- type=int,
+ type=type,
choices=choices,
required=required,
help=help,
@@ -198,7 +210,8 @@ class PositiveIntAction(argparse.Action):
setattr(namespace, self.dest, values)
return
- parser.error(self.message)
+ msg = '{} must be a positive integer'.format(option_string)
+ parser.error(msg)
class PathAction(argparse.Action):
diff --git a/contrib/python/pypodman/pypodman/lib/podman_parser.py b/contrib/python/pypodman/pypodman/lib/podman_parser.py
index d3c84224f..913546a91 100644
--- a/contrib/python/pypodman/pypodman/lib/podman_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/podman_parser.py
@@ -48,6 +48,18 @@ class PodmanArgumentParser(argparse.ArgumentParser):
super().__init__(**kwargs)
+ def add_flag(self, *args, **kwargs):
+ """Add flag to parser."""
+ flags = [a for a in args if a[0] in self.prefix_chars]
+ dest = flags[0].lstrip(self.prefix_chars)
+ no_flag = '{0}{0}no-{1}'.format(self.prefix_chars, dest)
+
+ group = self.add_mutually_exclusive_group(required=False)
+ group.add_argument(*flags, action='store_true', dest=dest, **kwargs)
+ group.add_argument(no_flag, action='store_false', dest=dest, **kwargs)
+ default = kwargs.get('default', False)
+ self.set_defaults(**{dest: default})
+
def initialize_parser(self):
"""Initialize parser without causing recursion meltdown."""
self.add_argument(
@@ -97,6 +109,8 @@ class PodmanArgumentParser(argparse.ArgumentParser):
actions_parser = self.add_subparsers(
dest='subparser_name', help='commands')
+ # For create/exec/run: don't process options intended for subcommand
+ actions_parser.REMAINDER = argparse.REMAINDER
# import buried here to prevent import loops
import pypodman.lib.actions # pylint: disable=cyclic-import
@@ -152,7 +166,7 @@ class PodmanArgumentParser(argparse.ArgumentParser):
reqattr(
'run_dir',
getattr(args, 'run_dir')
- or os.environ.get('RUN_DIR')
+ or os.environ.get('PODMAN_RUN_DIR')
or config['default'].get('run_dir')
or str(Path(args.xdg_runtime_dir, 'pypodman'))
) # yapf: disable
@@ -161,23 +175,24 @@ class PodmanArgumentParser(argparse.ArgumentParser):
args,
'host',
getattr(args, 'host')
- or os.environ.get('HOST')
+ or os.environ.get('PODMAN_HOST')
or config['default'].get('host')
) # yapf:disable
reqattr(
'username',
getattr(args, 'username')
+ or os.environ.get('PODMAN_USER')
+ or config['default'].get('username')
or os.environ.get('USER')
or os.environ.get('LOGNAME')
- or config['default'].get('username')
or getpass.getuser()
) # yapf:disable
reqattr(
'port',
getattr(args, 'port')
- or os.environ.get('PORT')
+ or os.environ.get('PODMAN_PORT')
or config['default'].get('port', None)
or 22
) # yapf:disable
@@ -185,7 +200,7 @@ class PodmanArgumentParser(argparse.ArgumentParser):
reqattr(
'remote_socket_path',
getattr(args, 'remote_socket_path')
- or os.environ.get('REMOTE_SOCKET_PATH')
+ or os.environ.get('PODMAN_REMOTE_SOCKET_PATH')
or config['default'].get('remote_socket_path')
or '/run/podman/io.podman'
) # yapf:disable
@@ -193,7 +208,7 @@ class PodmanArgumentParser(argparse.ArgumentParser):
reqattr(
'log_level',
getattr(args, 'log_level')
- or os.environ.get('LOG_LEVEL')
+ or os.environ.get('PODMAN_LOG_LEVEL')
or config['default'].get('log_level')
or logging.WARNING
) # yapf:disable
@@ -202,7 +217,7 @@ class PodmanArgumentParser(argparse.ArgumentParser):
args,
'identity_file',
getattr(args, 'identity_file')
- or os.environ.get('IDENTITY_FILE')
+ or os.environ.get('PODMAN_IDENTITY_FILE')
or config['default'].get('identity_file')
or os.path.expanduser('~{}/.ssh/id_dsa'.format(args.username))
) # yapf:disable
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index c2d8fc59d..20e2a84ea 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -39,7 +39,7 @@
%global shortcommit_conmon %(c=%{commit_conmon}; echo ${c:0:7})
Name: podman
-Version: 0.10.2
+Version: 0.12.2
Release: #COMMITDATE#.git%{shortcommit0}%{?dist}
Summary: Manage Pods, Containers and Container Images
License: ASL 2.0
@@ -378,10 +378,6 @@ providing packages with %{import_path} prefix.
%prep
%autosetup -Sgit -n %{repo}-%{shortcommit0}
-sed -i '/\/bin\/env/d' completions/bash/%{name}
-sed -i 's/0.0.0/%{version}/' contrib/python/%{name}/setup.py
-sed -i 's/0.0.0/%{version}/' contrib/python/py%{name}/setup.py
-mv pkg/hooks/README.md pkg/hooks/README-hooks.md
# untar cri-o
tar zxf %{SOURCE1}
@@ -416,15 +412,17 @@ popd
%install
install -dp %{buildroot}%{_unitdir}
-%{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{buildroot}%{_sysconfdir} \
+PODMAN_VERSION=%{version} %{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{buildroot}%{_sysconfdir} \
install.bin \
install.man \
install.cni \
install.systemd \
install.completions
+mv pkg/hooks/README.md pkg/hooks/README-hooks.md
+
%if %{with varlink}
-%{__make} DESTDIR=%{buildroot} install.python
+PODMAN_VERSION=%{version} %{__make} DESTDIR=%{buildroot} install.python
%endif # varlink
# install libpod.conf
diff --git a/docs/libpod.conf.5.md b/docs/libpod.conf.5.md
index 198e927ee..d63baeb88 100644
--- a/docs/libpod.conf.5.md
+++ b/docs/libpod.conf.5.md
@@ -24,6 +24,18 @@ libpod to manage containers.
**cgroup_manager**=""
Specify the CGroup Manager to use; valid values are "systemd" and "cgroupfs"
+**hooks_dir**=["*path*", ...]
+
+ Each `*.json` file in the path configures a hook for Podman containers. For more details on the syntax of the JSON files and the semantics of hook injection, see `oci-hooks(5)`. Podman and libpod currently support both the 1.0.0 and 0.1.0 hook schemas, although the 0.1.0 schema is deprecated.
+
+ Paths listed later in the array higher precedence (`oci-hooks(5)` discusses directory precedence).
+
+ For the annotation conditions, libpod uses any annotations set in the generated OCI configuration.
+
+ For the bind-mount conditions, only mounts explicitly requested by the caller via `--volume` are considered. Bind mounts that libpod inserts by default (e.g. `/dev/shm`) are not considered.
+
+ If `hooks_dir` is unset for root callers, Podman and libpod will currently default to `/usr/share/containers/oci/hooks.d` and `/etc/containers/oci/hooks.d` in order of increasing precedence. Using these defaults is deprecated, and callers should migrate to explicitly setting `hooks_dir`.
+
**static_dir**=""
Directory for persistent libpod files (database, etc)
By default this will be configured relative to where containers/storage
diff --git a/docs/podman-container-checkpoint.1.md b/docs/podman-container-checkpoint.1.md
index 4906e0e12..94e52dc78 100644
--- a/docs/podman-container-checkpoint.1.md
+++ b/docs/podman-container-checkpoint.1.md
@@ -17,6 +17,25 @@ are not deleted if checkpointing fails for further debugging. If checkpointing s
files are theoretically not needed, but if these files are needed Podman can keep the files
for further analysis.
+**--all, -a**
+
+Checkpoint all running containers.
+
+**--latest, -l**
+
+Instead of providing the container name or ID, checkpoint the last created container.
+
+**--leave-running, -R**
+
+Leave the container running after checkpointing instead of stopping it.
+
+**--tcp-established**
+
+Checkpoint a container with established TCP connections. If the checkpoint
+image contains established TCP connections, this options is required during
+restore. Defaults to not checkpointing containers with established TCP
+connections.
+
## EXAMPLE
podman container checkpoint mywebserver
diff --git a/docs/podman-container-exists.1.md b/docs/podman-container-exists.1.md
new file mode 100644
index 000000000..76701e2c2
--- /dev/null
+++ b/docs/podman-container-exists.1.md
@@ -0,0 +1,40 @@
+% PODMAN(1) Podman Man Pages
+% Brent Baude
+% November 2018
+# NAME
+podman-container-exists- Check if a container exists in local storage
+
+# SYNOPSIS
+**podman container exists**
+[**-h**|**--help**]
+CONTAINER
+
+# DESCRIPTION
+**podman container exists** checks if a container exists in local storage. The **ID** or **Name**
+of the container may be used as input. Podman will return an exit code
+of `0` when the container is found. A `1` will be returned otherwise. An exit code of `125` indicates there
+was an issue accessing the local storage.
+
+## Examples ##
+
+Check if an container called `webclient` exists in local storage (the container does actually exist).
+```
+$ sudo podman container exists webclient
+$ echo $?
+0
+$
+```
+
+Check if an container called `webbackend` exists in local storage (the container does not actually exist).
+```
+$ sudo podman container exists webbackend
+$ echo $?
+1
+$
+```
+
+## SEE ALSO
+podman(1)
+
+# HISTORY
+November 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-container-prune.1.md b/docs/podman-container-prune.1.md
new file mode 100644
index 000000000..1f3ef1d41
--- /dev/null
+++ b/docs/podman-container-prune.1.md
@@ -0,0 +1,31 @@
+% PODMAN(1) Podman Man Pages
+% Brent Baude
+% December 2018
+# NAME
+podman-container-prune - Remove all stopped containers
+
+# SYNOPSIS
+**podman container prune**
+[**-h**|**--help**]
+
+# DESCRIPTION
+**podman container prune** removes all stopped containers from local storage.
+
+## Examples ##
+
+Remove all stopped containers from local storage
+```
+$ sudo podman container prune
+878392adf2e6c5c9bb1fc19b69d37d2e98c8abf9d539c0bce4b15b46bbcce471
+37664467fbe3618bf9479c34393ac29c02696675addf1750f9e346581636cde7
+ed0c6468b8e1cb641b4621d1fe30cb477e1fefc5c0bceb66feaf2f7cb50e5962
+6ac6c8f0067b7a4682e6b8e18902665b57d1a0e07e885d9abcd382232a543ccd
+fff1c5b6c3631746055ec40598ce8ecaa4b82aef122f9e3a85b03b55c0d06c23
+602d343cd47e7cb3dfc808282a9900a3e4555747787ec6723bb68cedab8384d5
+```
+
+## SEE ALSO
+podman(1), podman-ps
+
+# HISTORY
+December 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-container-restore.1.md b/docs/podman-container-restore.1.md
index 6360bccb0..44219f3ef 100644
--- a/docs/podman-container-restore.1.md
+++ b/docs/podman-container-restore.1.md
@@ -24,6 +24,22 @@ processes in the checkpointed container.
Without the **-k**, **--keep** option the checkpoint will be consumed and cannot be used
again.
+**--all, -a**
+
+Restore all checkpointed containers.
+
+**--latest, -l**
+
+Instead of providing the container name or ID, restore the last created container.
+
+**--tcp-established**
+
+Restore a container with established TCP connections. If the checkpoint image
+contains established TCP connections, this option is required during restore.
+If the checkpoint image does not contain established TCP connections this
+option is ignored. Defaults to not restoring containers with established TCP
+connections.
+
## EXAMPLE
podman container restore mywebserver
diff --git a/docs/podman-container-runlabel.1.md b/docs/podman-container-runlabel.1.md
index 73b7d7e15..6f7b4dae8 100644
--- a/docs/podman-container-runlabel.1.md
+++ b/docs/podman-container-runlabel.1.md
@@ -95,8 +95,8 @@ option be used, as the default behavior of using the system-wide default policy
**--tls-verify**
Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
-then tls verification will be used, If set to false then tls verification will not be used. If not specified
-tls verification will be used unless the target registry is listed as an insecure registry in registries.conf
+then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
+TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf
## Examples ##
diff --git a/docs/podman-container.1.md b/docs/podman-container.1.md
index eac3343d5..3675d9719 100644
--- a/docs/podman-container.1.md
+++ b/docs/podman-container.1.md
@@ -20,6 +20,7 @@ The container command allows you to manage containers
| create | [podman-create(1)](podman-create.1.md) | Create a new container. |
| diff | [podman-diff(1)](podman-diff.1.md) | Inspect changes on a container or image's filesystem. |
| exec | [podman-exec(1)](podman-exec.1.md) | Execute a command in a running container. |
+| exists | [podman-exists(1)](podman-container-exists.1.md) | Check if a container exists in local storage |
| export | [podman-export(1)](podman-export.1.md) | Export a container's filesystem contents as a tar archive. |
| inspect | [podman-inspect(1)](podman-inspect.1.md) | Display a container or image's configuration. |
| kill | [podman-kill(1)](podman-kill.1.md) | Kill the main process in one or more containers. |
@@ -28,6 +29,7 @@ The container command allows you to manage containers
| mount | [podman-mount(1)](podman-mount.1.md) | Mount a working container's root filesystem. |
| pause | [podman-pause(1)](podman-pause.1.md) | Pause one or more containers. |
| port | [podman-port(1)](podman-port.1.md) | List port mappings for the container. |
+| prune | [podman-container-prune(1)](podman-container-prune.1.md) | Remove all stopped containers from local storage |
| refresh | [podman-refresh(1)](podman-container-refresh.1.md) | Refresh the state of all containers |
| restart | [podman-restart(1)](podman-restart.1.md) | Restart one or more containers. |
| restore | [podman-container-restore(1)](podman-container-restore.1.md) | Restores one or more containers from a checkpoint. |
@@ -38,7 +40,6 @@ The container command allows you to manage containers
| stop | [podman-stop(1)](podman-stop.1.md) | Stop one or more running containers. |
| top | [podman-top(1)](podman-top.1.md) | Display the running processes of a container. |
| umount | [podman-umount(1)](podman-umount.1.md) | Unmount a working container's root filesystem. |
-| unmount | [podman-umount(1)](podman-umount.1.md) | Unmount a working container's root filesystem. |
| unpause | [podman-unpause(1)](podman-unpause.1.md) | Unpause one or more containers. |
| wait | [podman-wait(1)](podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes. |
diff --git a/docs/podman-create.1.md b/docs/podman-create.1.md
index 68c00685b..97d6e77b1 100644
--- a/docs/podman-create.1.md
+++ b/docs/podman-create.1.md
@@ -426,7 +426,8 @@ Set the Network mode for the container
'container:<name|id>': reuse another container's network stack
'host': use the podman host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
'<network-name>|<network-id>': connect to a user-defined network
- 'ns:<path>' path to a network namespace to join
+ 'ns:<path>': path to a network namespace to join
+ 'slirp4netns': use slirp4netns to create a user network stack. This is the default for rootless containers
**--network-alias**=[]
@@ -454,7 +455,8 @@ Tune the container's pids limit. Set `-1` to have unlimited pids for the contain
**--pod**=""
-Run container in an existing pod
+Run container in an existing pod. If you want podman to make the pod for you, preference the pod name with `new:`.
+To make a pod with more granular options, use the `podman pod create` command before creating a container.
**--privileged**=*true*|*false*
@@ -465,9 +467,10 @@ By default, podman containers are
This is because by default a container is not allowed to access any devices.
A “privileged” container is given access to all devices.
-When the operator executes **podman run --privileged**, podman enables access
-to all devices on the host as well as set turn off most of the security measures
-protecting the host from the container.
+When the operator executes a privileged container, podman enables access
+to all devices on the host, turns off graphdriver mount options, as well as
+turning off most of the security measures protecting the host from the
+container.
**-p**, **--publish**=[]
@@ -505,6 +508,14 @@ By default a container will have its root filesystem writable allowing processes
to write files anywhere. By specifying the `--read-only` flag the container will have
its root filesystem mounted as read only prohibiting any writes.
+**--restart=""**
+
+Not implemented.
+
+Restart should be handled via a systemd unit files. Please add your podman
+commands to a unit file and allow systemd or your init system to handle the
+restarting of the container processes. See example below.
+
**--rm**=*true*|*false*
Automatically remove the container when it exits. The default is *false*.
@@ -764,13 +775,28 @@ the uid and gid from the host.
$ podman create --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello
```
+### Running a podman container to restart inside of a systemd unit file
+
+
+```
+[Unit]
+Description=My App
+[Service]
+Restart=always
+ExecStart=/usr/bin/podman start -a my_app
+ExecStop=/usr/bin/podman stop -t 10 my_app
+KillMode=process
+[Install]
+WantedBy=multi-user.target
+```
+
## FILES
**/etc/subuid**
**/etc/subgid**
## SEE ALSO
-subgid(5), subuid(5), libpod.conf(5)
+subgid(5), subuid(5), libpod.conf(5), systemd.unit(5)
## HISTORY
October 2017, converted from Docker documentation to podman by Dan Walsh for podman <dwalsh@redhat.com>
diff --git a/docs/podman-generate-kube.1.md b/docs/podman-generate-kube.1.md
new file mode 100644
index 000000000..396f69615
--- /dev/null
+++ b/docs/podman-generate-kube.1.md
@@ -0,0 +1,151 @@
+% podman-generate Podman Man Pages
+% Brent Baude
+% December 2018
+# NAME
+podman-generate-kube - Generate Kubernetes YAML
+
+# SYNOPSIS
+**podman generate kube **
+[**-h**|**--help**]
+[**-s**][**--service**]
+CONTAINER|POD
+
+# DESCRIPTION
+**podman generate kube** will generate Kubernetes Pod YAML (v1 specification) from a podman container or pod. Whether
+the input is for a container or pod, Podman will always generate the specification as a Pod. The input may be in the form
+of a pod or container name or ID.
+
+The **service** option can be used to generate a Service specification for the corresponding Pod ouput. In particular,
+if the object has portmap bindings, the service specification will include a NodePort declaration to expose the service. A
+random port is assigned by Podman in the specification.
+
+# OPTIONS:
+
+**s** **--service**
+Generate a Kubernetes service object in addition to the Pods.
+
+## Examples ##
+
+Create Kubernetes Pod YAML for a container called `some-mariadb` .
+```
+$ sudo podman generate kube some-mariadb
+# Generation of Kubenetes YAML is still under development!
+#
+# Save the output of this file and use kubectl create -f to import
+# it into Kubernetes.
+#
+# Created with podman-0.11.2-dev
+apiVersion: v1
+kind: Pod
+metadata:
+ creationTimestamp: 2018-12-03T19:07:59Z
+ labels:
+ app: some-mariadb
+ name: some-mariadb-libpod
+spec:
+ containers:
+ - command:
+ - docker-entrypoint.sh
+ - mysqld
+ env:
+ - name: PATH
+ value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ - name: TERM
+ value: xterm
+ - name: HOSTNAME
+ - name: container
+ value: podman
+ - name: GOSU_VERSION
+ value: "1.10"
+ - name: GPG_KEYS
+ value: "199369E5404BD5FC7D2FE43BCBCB082A1BB943DB \t177F4010FE56CA3336300305F1656F24C74CD1D8
+ \t430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A \t4D1BB29D63D98E422B2113B19334A25F8507EFA5"
+ - name: MARIADB_MAJOR
+ value: "10.3"
+ - name: MARIADB_VERSION
+ value: 1:10.3.10+maria~bionic
+ - name: MYSQL_ROOT_PASSWORD
+ value: x
+ image: quay.io/baude/demodb:latest
+ name: some-mariadb
+ ports:
+ - containerPort: 3306
+ hostPort: 36533
+ protocol: TCP
+ resources: {}
+ securityContext:
+ allowPrivilegeEscalation: true
+ privileged: false
+ readOnlyRootFilesystem: false
+ tty: true
+ workingDir: /
+status: {}
+```
+
+Create Kubernetes Pod YAML for a pod called `demoweb` and include a service.
+```
+$ sudo podman generate kube -s demoweb
+# Generation of Kubernetes YAML is still under development!
+#
+# Save the output of this file and use kubectl create -f to import
+# it into Kubernetes.
+#
+# Created with podman-0.12.2-dev
+apiVersion: v1
+kind: Pod
+metadata:
+ creationTimestamp: 2018-12-18T15:16:06Z
+ labels:
+ app: demoweb
+ name: demoweb-libpod
+spec:
+ containers:
+ - command:
+ - python3
+ - /root/code/graph.py
+ env:
+ - name: PATH
+ value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ - name: TERM
+ value: xterm
+ - name: HOSTNAME
+ - name: container
+ value: podman
+ image: quay.io/baude/demoweb:latest
+ name: practicalarchimedes
+ resources: {}
+ securityContext:
+ allowPrivilegeEscalation: true
+ capabilities: {}
+ privileged: false
+ readOnlyRootFilesystem: false
+ tty: true
+ workingDir: /root/code
+status: {}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ creationTimestamp: 2018-12-18T15:16:06Z
+ labels:
+ app: demoweb
+ name: demoweb-libpod
+spec:
+ ports:
+ - name: "8050"
+ nodePort: 31269
+ port: 8050
+ protocol: TCP
+ targetPort: 0
+ selector:
+ app: demoweb
+ type: NodePort
+status:
+ loadBalancer: {}
+```
+
+## SEE ALSO
+podman(1), podman-container, podman-pod
+
+# HISTORY
+Decemeber 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-generate.1.md b/docs/podman-generate.1.md
new file mode 100644
index 000000000..f19f48511
--- /dev/null
+++ b/docs/podman-generate.1.md
@@ -0,0 +1,19 @@
+% podman-generate(1)
+
+## NAME
+podman\-container - generate structured data based for a containers and pods
+
+## SYNOPSIS
+**podman generate** *subcommand*
+
+## DESCRIPTION
+The generate command will create structured output (like YAML) based on a container or pod.
+
+## COMMANDS
+
+| Command | Man Page | Description |
+| ------- | --------------------------------------------------- | ---------------------------------------------------------------------------- |
+| kube | [podman-generate-kube(1)](podman-generate-kube.1.md) | Generate Kubernetes YAML based on a pod or container
+
+## SEE ALSO
+podman, podman-pod, podman-container
diff --git a/docs/podman-image-exists.1.md b/docs/podman-image-exists.1.md
new file mode 100644
index 000000000..e04c23721
--- /dev/null
+++ b/docs/podman-image-exists.1.md
@@ -0,0 +1,40 @@
+% PODMAN(1) Podman Man Pages
+% Brent Baude
+% November 2018
+# NAME
+podman-image-exists- Check if an image exists in local storage
+
+# SYNOPSIS
+**podman image exists**
+[**-h**|**--help**]
+IMAGE
+
+# DESCRIPTION
+**podman image exists** checks if an image exists in local storage. The **ID** or **Name**
+of the image may be used as input. Podman will return an exit code
+of `0` when the image is found. A `1` will be returned otherwise. An exit code of `125` indicates there
+was an issue accessing the local storage.
+
+## Examples ##
+
+Check if an image called `webclient` exists in local storage (the image does actually exist).
+```
+$ sudo podman image exists webclient
+$ echo $?
+0
+$
+```
+
+Check if an image called `webbackend` exists in local storage (the image does not actually exist).
+```
+$ sudo podman image exists webbackend
+$ echo $?
+1
+$
+```
+
+## SEE ALSO
+podman(1)
+
+# HISTORY
+November 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-image-prune.1.md b/docs/podman-image-prune.1.md
new file mode 100644
index 000000000..db76b26e0
--- /dev/null
+++ b/docs/podman-image-prune.1.md
@@ -0,0 +1,32 @@
+% PODMAN(1) Podman Man Pages
+% Brent Baude
+% December 2018
+# NAME
+podman-image-prune - Remove all unused images
+
+# SYNOPSIS
+**podman image prune**
+[**-h**|**--help**]
+
+# DESCRIPTION
+**podman image prune** removes all unused images from local storage. An unused image
+is defined as an image that does not have any containers based on it.
+
+## Examples ##
+
+Remove all unused images from local storage
+```
+$ sudo podman image prune
+f3e20dc537fb04cb51672a5cb6fdf2292e61d411315549391a0d1f64e4e3097e
+324a7a3b2e0135f4226ffdd473e4099fd9e477a74230cdc35de69e84c0f9d907
+6125002719feb1ddf3030acab1df6156da7ce0e78e571e9b6e9c250424d6220c
+91e732da5657264c6f4641b8d0c4001c218ae6c1adb9dcef33ad00cafd37d8b6
+e4e5109420323221f170627c138817770fb64832da7d8fe2babd863148287fca
+77a57fa8285e9656dbb7b23d9efa837a106957409ddd702f995605af27a45ebe
+```
+
+## SEE ALSO
+podman(1), podman-images
+
+# HISTORY
+December 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-image-trust.1.md b/docs/podman-image-trust.1.md
new file mode 100644
index 000000000..24209698c
--- /dev/null
+++ b/docs/podman-image-trust.1.md
@@ -0,0 +1,81 @@
+% podman-image-trust "1"
+
+# NAME
+podman\-trust - Manage container image trust policy
+
+
+# SYNOPSIS
+**podman image trust set|show**
+[**-h**|**--help**]
+[**-j**|**--json**]
+[**--raw**]
+[**-f**|**--pubkeysfile** KEY1 [**f**|**--pubkeysfile** KEY2,...]]
+[**-t**|**--type** signedBy|accept|reject]
+REGISTRY[/REPOSITORY]
+
+# DESCRIPTION
+Manages the trust policy of the host system. Trust policy describes
+a registry scope (registry and/or repository) that must be signed by public keys. Trust
+is defined in **/etc/containers/policy.json**. Trust is enforced when a user attempts to pull
+an image from a registry.
+
+Trust scope is evaluated by most specific to least specific. In other words, policy may
+be defined for an entire registry, but refined for a particular repository in that
+registry. See below for examples.
+
+Trust **type** provides a way to whitelist ("accept") or blacklist
+("reject") registries.
+
+Trust may be updated using the command **podman image trust set** for an existing trust scope.
+
+# OPTIONS
+**-h** **--help**
+ Print usage statement.
+
+**-f** **--pubkeysfile**
+ A path to an exported public key on the local system. Key paths
+ will be referenced in policy.json. Any path may be used but path
+ **/etc/pki/containers** is recommended. Option may be used multiple times to
+ require an image be sigend by multiple keys. One of **--pubkeys** or
+ **--pubkeysfile** is required for **signedBy** type.
+
+**-t** **--type**
+ The trust type for this policy entry. Accepted values:
+ **signedBy** (default): Require signatures with corresponding list of
+ public keys
+ **accept**: do not require any signatures for this
+ registry scope
+ **reject**: do not accept images for this registry scope
+
+# show OPTIONS
+
+**--raw**
+ Output trust policy file as raw JSON
+
+**-j** **--json**
+ Output trust as JSON for machine parsing
+
+# EXAMPLES
+
+Accept all unsigned images from a registry
+
+ podman image trust set --type accept docker.io
+
+Modify default trust policy
+
+ podman image trust set -t reject default
+
+Display system trust policy
+
+ podman image trust show
+
+Display trust policy file
+
+ podman image trust show --raw
+
+Display trust as JSON
+
+ podman image trust show --json
+
+# HISTORY
+December 2018, originally compiled by Qi Wang (qiwan at redhat dot com)
diff --git a/docs/podman-image.1.md b/docs/podman-image.1.md
index 33de0456f..8aa7cee64 100644
--- a/docs/podman-image.1.md
+++ b/docs/podman-image.1.md
@@ -14,16 +14,19 @@ The image command allows you to manage images
| Command | Man Page | Description |
| -------- | ----------------------------------------- | ------------------------------------------------------------------------------ |
| build | [podman-build(1)](podman-build.1.md) | Build a container using a Dockerfile. |
+| exists | [podman-exists(1)](podman-image-exists.1.md) | Check if a image exists in local storage |
| history | [podman-history(1)](podman-history.1.md) | Show the history of an image. |
| import | [podman-import(1)](podman-import.1.md) | Import a tarball and save it as a filesystem image. |
| inspect | [podman-inspect(1)](podman-inspect.1.md) | Display a image or image's configuration. |
| load | [podman-load(1)](podman-load.1.md) | Load an image from the docker archive. |
| ls | [podman-images(1)](podman-images.1.md) | Prints out information about images. |
| pull | [podman-pull(1)](podman-pull.1.md) | Pull an image from a registry. |
+| prune| [podman-container-prune(1)](podman-container-prune.1.md) | Removed all unused images from the local store |
| push | [podman-push(1)](podman-push.1.md) | Push an image from local storage to elsewhere. |
| rm | [podman-rm(1)](podman-rmi.1.md) | Removes one or more locally stored images. |
| save | [podman-save(1)](podman-save.1.md) | Save an image to docker-archive or oci. |
| tag | [podman-tag(1)](podman-tag.1.md) | Add an additional name to a local image. |
+| trust | [podman-image-trust(1)](podman-image-trust.1.md) | Manage container image trust policy.
## SEE ALSO
podman
diff --git a/docs/podman-images.1.md b/docs/podman-images.1.md
index 0bd829a8e..832df0e23 100644
--- a/docs/podman-images.1.md
+++ b/docs/podman-images.1.md
@@ -49,9 +49,9 @@ Sort by created, id, repository, size or tag (default: created)
```
# podman images
REPOSITORY TAG IMAGE ID CREATED SIZE
-docker.io/kubernetes/pause latest e3d42bcaf643 3 years ago 251kB
-<none> <none> ebb91b73692b 4 weeks ago 27.2MB
-docker.io/library/ubuntu latest 4526339ae51c 6 weeks ago 126MB
+docker.io/kubernetes/pause latest e3d42bcaf643 3 years ago 251 kB
+<none> <none> ebb91b73692b 4 weeks ago 27.2 MB
+docker.io/library/ubuntu latest 4526339ae51c 6 weeks ago 126 MB
```
```
@@ -63,17 +63,17 @@ ebb91b73692b
```
# podman images --noheading
-docker.io/kubernetes/pause latest e3d42bcaf643 3 years ago 251kB
-<none> <none> ebb91b73692b 4 weeks ago 27.2MB
-docker.io/library/ubuntu latest 4526339ae51c 6 weeks ago 126MB
+docker.io/kubernetes/pause latest e3d42bcaf643 3 years ago 251 kB
+<none> <none> ebb91b73692b 4 weeks ago 27.2 MB
+docker.io/library/ubuntu latest 4526339ae51c 6 weeks ago 126 MB
```
```
# podman images --no-trunc
REPOSITORY TAG IMAGE ID CREATED SIZE
-docker.io/kubernetes/pause latest sha256:e3d42bcaf643097dd1bb0385658ae8cbe100a80f773555c44690d22c25d16b27 3 years ago 251kB
-<none> <none> sha256:ebb91b73692bd27890685846412ae338d13552165eacf7fcd5f139bfa9c2d6d9 4 weeks ago 27.2MB
-docker.io/library/ubuntu latest sha256:4526339ae51c3cdc97956a7a961c193c39dfc6bd9733b0d762a36c6881b5583a 6 weeks ago 126MB
+docker.io/kubernetes/pause latest sha256:e3d42bcaf643097dd1bb0385658ae8cbe100a80f773555c44690d22c25d16b27 3 years ago 251 kB
+<none> <none> sha256:ebb91b73692bd27890685846412ae338d13552165eacf7fcd5f139bfa9c2d6d9 4 weeks ago 27.2 MB
+docker.io/library/ubuntu latest sha256:4526339ae51c3cdc97956a7a961c193c39dfc6bd9733b0d762a36c6881b5583a 6 weeks ago 126 MB
```
```
@@ -87,7 +87,7 @@ ebb91b73692b <none> <none>
```
# podman images --filter dangling=true
REPOSITORY TAG IMAGE ID CREATED SIZE
-<none> <none> ebb91b73692b 4 weeks ago 27.2MB
+<none> <none> ebb91b73692b 4 weeks ago 27.2 MB
```
```
@@ -126,25 +126,25 @@ REPOSITORY TAG IMAGE ID CREATED SIZE
```
# podman images --sort repository
REPOSITORY TAG IMAGE ID CREATED SIZE
-<none> <none> 2460217d76fc About a minute ago 4.41MB
-docker.io/library/alpine latest 3fd9065eaf02 5 months ago 4.41MB
-localhost/myapp latest b2e0ad03474a About a minute ago 4.41MB
-registry.access.redhat.com/rhel7 latest 7a840db7f020 2 weeks ago 211MB
-registry.fedoraproject.org/fedora 27 801894bc0e43 6 weeks ago 246MB
+<none> <none> 2460217d76fc About a minute ago 4.41 MB
+docker.io/library/alpine latest 3fd9065eaf02 5 months ago 4.41 MB
+localhost/myapp latest b2e0ad03474a About a minute ago 4.41 MB
+registry.access.redhat.com/rhel7 latest 7a840db7f020 2 weeks ago 211 MB
+registry.fedoraproject.org/fedora 27 801894bc0e43 6 weeks ago 246 MB
```
```
# podman images
REPOSITORY TAG IMAGE ID CREATED SIZE
-localhost/test latest 18f0c080cd72 4 seconds ago 4.42MB
-docker.io/library/alpine latest 3fd9065eaf02 5 months ago 4.41MB
+localhost/test latest 18f0c080cd72 4 seconds ago 4.42 MB
+docker.io/library/alpine latest 3fd9065eaf02 5 months ago 4.41 MB
# podman images -a
REPOSITORY TAG IMAGE ID CREATED SIZE
-localhost/test latest 18f0c080cd72 6 seconds ago 4.42MB
-<none> <none> 270e70dc54c0 7 seconds ago 4.42MB
-<none> <none> 4ed6fbe43414 8 seconds ago 4.41MB
-<none> <none> 6b0df8e71508 8 seconds ago 4.41MB
-docker.io/library/alpine latest 3fd9065eaf02 5 months ago 4.41MB
+localhost/test latest 18f0c080cd72 6 seconds ago 4.42 MB
+<none> <none> 270e70dc54c0 7 seconds ago 4.42 MB
+<none> <none> 4ed6fbe43414 8 seconds ago 4.41 MB
+<none> <none> 6b0df8e71508 8 seconds ago 4.41 MB
+docker.io/library/alpine latest 3fd9065eaf02 5 months ago 4.41 MB
```
## SEE ALSO
diff --git a/docs/podman-login.1.md b/docs/podman-login.1.md
index a3ee2929c..e72d1deca 100644
--- a/docs/podman-login.1.md
+++ b/docs/podman-login.1.md
@@ -36,6 +36,10 @@ Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
+**--get-login**
+
+Return the logged-in user for the registry. Return error if no login is found.
+
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
@@ -43,7 +47,9 @@ Default certificates directory is _/etc/containers/certs.d_.
**--tls-verify**
-Require HTTPS and verify certificates when contacting registries (default: true)
+Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
+then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
+TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
**--help**, **-h**
diff --git a/docs/podman-pod-create.1.md b/docs/podman-pod-create.1.md
index 673ad9a8c..a63b12d73 100644
--- a/docs/podman-pod-create.1.md
+++ b/docs/podman-pod-create.1.md
@@ -51,6 +51,15 @@ Assign a name to the pod
Write the pod ID to the file
+**-p**, **--publish**=[]
+
+Publish a port or range of ports from the pod to the host
+
+Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort`
+Both hostPort and containerPort can be specified as a range of ports.
+When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range.
+Use `podman port` to see the actual mapping: `podman port CONTAINER $CONTAINERPORT`
+
**--share**=""
A comma deliminated list of kernel namespaces to share. If none or "" is specified, no namespaces will be shared. The namespaces to choose from are ipc, net, pid, user, uts.
diff --git a/docs/podman-pod-exists.1.md b/docs/podman-pod-exists.1.md
new file mode 100644
index 000000000..8fb2fc90e
--- /dev/null
+++ b/docs/podman-pod-exists.1.md
@@ -0,0 +1,40 @@
+% podman-pod-exits(1) Podman Man Pages
+% Brent Baude
+% December 2018
+# NAME
+podman-pod-exists- Check if a pod exists in local storage
+
+# SYNOPSIS
+**podman pod exists**
+[**-h**|**--help**]
+POD
+
+# DESCRIPTION
+**podman pod exists** checks if a pod exists in local storage. The **ID** or **Name**
+of the pod may be used as input. Podman will return an exit code
+of `0` when the pod is found. A `1` will be returned otherwise. An exit code of `125` indicates there
+was an issue accessing the local storage.
+
+## Examples ##
+
+Check if a pod called `web` exists in local storage (the pod does actually exist).
+```
+$ sudo podman pod exists web
+$ echo $?
+0
+$
+```
+
+Check if a pod called `backend` exists in local storage (the pod does not actually exist).
+```
+$ sudo podman pod exists backend
+$ echo $?
+1
+$
+```
+
+## SEE ALSO
+podman-pod(1), podman(1)
+
+# HISTORY
+December 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-pod-stop.1.md b/docs/podman-pod-stop.1.md
index 74799273e..7544f8bf7 100644
--- a/docs/podman-pod-stop.1.md
+++ b/docs/podman-pod-stop.1.md
@@ -19,26 +19,48 @@ Stops all pods
Instead of providing the pod name or ID, stop the last created pod.
+**--timeout, --time, t**
+
+Timeout to wait before forcibly stopping the containers in the pod.
+
## EXAMPLE
-podman pod stop mywebserverpod
+Stop a pod called *mywebserverpod*
+```
+$ podman pod stop mywebserverpod
cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
+```
-podman pod stop 490eb 3557fb
+Stop two pods by their short IDs.
+```
+$ podman pod stop 490eb 3557fb
490eb241aaf704d4dd2629904410fe4aa31965d9310a735f8755267f4ded1de5
3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
+```
+Stop the most recent pod
+```
+$ podman pod stop --latest
3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
+```
-podman pod stop --latest
-3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
-
-podman pod stop --all
+Stop all pods
+```
+$ podman pod stop --all
19456b4cd557eaf9629825113a552681a6013f8c8cad258e36ab825ef536e818
3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
490eb241aaf704d4dd2629904410fe4aa31965d9310a735f8755267f4ded1de5
70c358daecf71ef9be8f62404f926080ca0133277ef7ce4f6aa2d5af6bb2d3e9
cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
+```
+
+Stop all pods with a timeout of 1 second.
+```
+$ podman pod stop -a -t 1
+3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
+490eb241aaf704d4dd2629904410fe4aa31965d9310a735f8755267f4ded1de5
+70c358daecf71ef9be8f62404f926080ca0133277ef7ce4f6aa2d5af6bb2d3e9
+```
## SEE ALSO
podman-pod(1), podman-pod-start(1), podman-stop(1)
diff --git a/docs/podman-ps.1.md b/docs/podman-ps.1.md
index 2cb77ffed..8b86703d8 100644
--- a/docs/podman-ps.1.md
+++ b/docs/podman-ps.1.md
@@ -24,7 +24,7 @@ all the containers information. By default it lists:
Show all the containers, default is only running containers
-**--pod**
+**--pod, -p**
Display the pods the containers are associated with
@@ -103,6 +103,13 @@ Valid filters are listed below:
Print usage statement
+**--sync**
+
+Force a sync of container state with the OCI runtime.
+In some cases, a container's state in the runtime can become out of sync with Podman's state.
+This will update Podman's state based on what the OCI runtime reports.
+Forcibly syncing is much slower, but can resolve inconsistent state issues.
+
## EXAMPLES
```
diff --git a/docs/podman-pull.1.md b/docs/podman-pull.1.md
index 86c6823af..2196e251e 100644
--- a/docs/podman-pull.1.md
+++ b/docs/podman-pull.1.md
@@ -77,8 +77,8 @@ option be used, as the default behavior of using the system-wide default policy
**--tls-verify**
Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
-then tls verification will be used, If set to false then tls verification will not be used. If not specified
-tls verification will be used unless the target registry is listed as an insecure registry in registries.conf.
+then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
+TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
**--help**, **-h**
diff --git a/docs/podman-push.1.md b/docs/podman-push.1.md
index 537988ea0..3ce156010 100644
--- a/docs/podman-push.1.md
+++ b/docs/podman-push.1.md
@@ -93,7 +93,9 @@ Add a signature at the destination using the specified key
**--tls-verify**
-Require HTTPS and verify certificates when contacting registries (default: true)
+Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
+then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
+TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
## EXAMPLE
diff --git a/docs/podman-rm.1.md b/docs/podman-rm.1.md
index 7474a0d1f..56664a8c1 100644
--- a/docs/podman-rm.1.md
+++ b/docs/podman-rm.1.md
@@ -13,7 +13,7 @@ podman\-rm - Remove one or more containers
**--force, f**
-Force the removal of a running container
+Force the removal of a running and paused containers
**--all, a**
@@ -29,16 +29,29 @@ to run containers such as CRI-O, the last started container could be from either
Remove the volumes associated with the container. (Not yet implemented)
## EXAMPLE
-
+Remove a container by its name *mywebserver*
+```
podman rm mywebserver
-
+```
+Remove several containers by name and container id.
+```
podman rm mywebserver myflaskserver 860a4b23
+```
+Forcibly remove a container by container ID.
+```
podman rm -f 860a4b23
+```
+Remove all containers regardless of its run state.
+```
podman rm -f -a
+```
+Forcibly remove the latest container created.
+```
podman rm -f --latest
+```
## SEE ALSO
podman(1), podman-rmi(1)
diff --git a/docs/podman-rmi.1.md b/docs/podman-rmi.1.md
index f035897ee..9c080c9f1 100644
--- a/docs/podman-rmi.1.md
+++ b/docs/podman-rmi.1.md
@@ -19,15 +19,25 @@ Remove all images in the local storage.
This option will cause podman to remove all containers that are using the image before removing the image from the system.
-## EXAMPLE
-
-podman rmi imageID
+Remove an image by its short ID
+```
+podman rmi c0ed59d05ff7
+```
+Remove an image and its associated containers.
+```
podman rmi --force imageID
+````
-podman rmi imageID1 imageID2 imageID3
+Remove multiple images by their shortened IDs.
+```
+podman rmi c4dfb1609ee2 93fd78260bd1 c0ed59d05ff7
+```
+Remove all images and containers.
+```
podman rmi -a -f
+```
## SEE ALSO
podman(1)
diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md
index 912026a55..c0a466a9c 100644
--- a/docs/podman-run.1.md
+++ b/docs/podman-run.1.md
@@ -408,7 +408,8 @@ Set the Network mode for the container:
- `container:<name|id>`: reuse another container's network stack
- `host`: use the podman host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
- `<network-name>|<network-id>`: connect to a user-defined network
-- `ns:<path>` path to a network namespace to join
+- `ns:<path>`: path to a network namespace to join
+- `slirp4netns`: use slirp4netns to create a user network stack. This is the default for rootless containers
**--network-alias**=[]
@@ -438,7 +439,8 @@ Tune the container's pids limit. Set `-1` to have unlimited pids for the contain
**--pod**=""
-Run container in an existing pod
+Run container in an existing pod. If you want podman to make the pod for you, preference the pod name with `new:`.
+To make a pod with more granular options, use the `podman pod create` command before creating a container.
**--privileged**=*true*|*false*
@@ -450,8 +452,9 @@ container is not allowed to access any devices. A “privileged” container
is given access to all devices.
When the operator executes **podman run --privileged**, podman enables access
-to all devices on the host as well as set turn off most of the security measures
-protecting the host from the container.
+to all devices on the host, turns off graphdriver mount options, as well as
+turning off most of the security measures protecting the host from the
+container.
**-p**, **--publish**=[]
@@ -494,6 +497,14 @@ By default a container will have its root filesystem writable allowing processes
to write files anywhere. By specifying the `--read-only` flag the container will have
its root filesystem mounted as read only prohibiting any writes.
+**--restart=""**
+
+Not implemented.
+
+Restart should be handled via a systemd unit files. Please add your podman
+commands to a unit file and allow systemd or your init system to handle the
+restarting of the container processes. See example below.
+
**--rm**=*true*|*false*
Automatically remove the container when it exits. The default is *false*.
@@ -1056,13 +1067,28 @@ the uid and gid from the host.
$ podman run --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello
```
+### Running a podman container to restart inside of a systemd unit file
+
+
+```
+[Unit]
+Description=My App
+[Service]
+Restart=always
+ExecStart=/usr/bin/podman start -a my_app
+ExecStop=/usr/bin/podman stop -t 10 my_app
+KillMode=process
+[Install]
+WantedBy=multi-user.target
+```
+
## FILES
**/etc/subuid**
**/etc/subgid**
## SEE ALSO
-subgid(5), subuid(5), libpod.conf(5)
+subgid(5), subuid(5), libpod.conf(5), systemd.unit(5)
## HISTORY
September 2018, updated by Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
diff --git a/docs/podman-search.1.md b/docs/podman-search.1.md
index ea1228f94..61f50f1dc 100644
--- a/docs/podman-search.1.md
+++ b/docs/podman-search.1.md
@@ -72,8 +72,8 @@ Do not truncate the output
**--tls-verify**
Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
-then tls verification will be used. If set to false then tls verification will not be used if needed. If not specified
-default registries will be searched through (in /etc/containers/registries.conf), and tls will be skipped if a default
+then TLS verification will be used. If set to false, then TLS verification will not be used if needed. If not specified,
+default registries will be searched through (in /etc/containers/registries.conf), and TLS will be skipped if a default
registry is listed in the insecure registries.
**--help**, **-h**
diff --git a/docs/podman-version.1.md b/docs/podman-version.1.md
index 0c9b9ceed..749a33afd 100644
--- a/docs/podman-version.1.md
+++ b/docs/podman-version.1.md
@@ -16,8 +16,31 @@ OS, and Architecture.
Print usage statement
+**--format**
+
+Change output format to "json" or a Go template.
+
+## Example
+
+A sample output of the `version` command:
+```
+$ podman version
+Version: 0.11.1
+Go Version: go1.11
+Git Commit: "8967a1d691ed44896b81ad48c863033f23c65eb0-dirty"
+Built: Thu Nov 8 22:35:40 2018
+OS/Arch: linux/amd64
+```
+
+Filtering out only the version:
+```
+$ podman version --format '{{.Version}}'
+0.11.2
+```
+
## SEE ALSO
podman(1), crio(8)
## HISTORY
+November 2018, Added --format flag by Tomas Tomecek <ttomecek@redhat.com>
July 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/podman-volume-create.1.md b/docs/podman-volume-create.1.md
new file mode 100644
index 000000000..795d7b449
--- /dev/null
+++ b/docs/podman-volume-create.1.md
@@ -0,0 +1,48 @@
+% podman-volume-create(1)
+
+## NAME
+podman\-volume\-create - Create a new volume
+
+## SYNOPSIS
+**podman volume create** [*options*]
+
+## DESCRIPTION
+
+Creates an empty volume and prepares it to be used by containers. The volume
+can be created with a specific name, if a name is not given a random name is
+generated. You can add metadata to the volume by using the **--label** flag and
+driver options can be set using the **--opt** flag.
+
+## OPTIONS
+
+**--driver**=""
+
+Specify the volume driver name (default local).
+
+**--help**
+
+Print usage statement
+
+**-l**, **--label**=[]
+
+Set metadata for a volume (e.g., --label mykey=value).
+
+**-o**, **--opt**=[]
+
+Set driver specific options.
+
+## EXAMPLES
+
+```
+$ podman volume create myvol
+
+$ podman volume create
+
+$ podman volume create --label foo=bar myvol
+```
+
+## SEE ALSO
+podman-volume(1)
+
+## HISTORY
+November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/podman-volume-inspect.1.md b/docs/podman-volume-inspect.1.md
new file mode 100644
index 000000000..6d5b184ee
--- /dev/null
+++ b/docs/podman-volume-inspect.1.md
@@ -0,0 +1,45 @@
+% podman-volume-inspect(1)
+
+## NAME
+podman\-volume\-inspect - Inspect one or more volumes
+
+## SYNOPSIS
+**podman volume inspect** [*options*]
+
+## DESCRIPTION
+
+Display detailed information on one or more volumes. The output can be formated using
+the **--format** flag and a Go template. To get detailed information about all the
+existing volumes, use the **--all** flag.
+
+
+## OPTIONS
+
+**-a**, **--all**=""
+
+Inspect all volumes.
+
+**--format**=""
+
+Format volume output using Go template
+
+**--help**
+
+Print usage statement
+
+
+## EXAMPLES
+
+```
+$ podman volume inspect myvol
+
+$ podman volume inspect --all
+
+$ podman volume inspect --format "{{.Driver}} {{.Scope}}" myvol
+```
+
+## SEE ALSO
+podman-volume(1)
+
+## HISTORY
+November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/podman-volume-ls.1.md b/docs/podman-volume-ls.1.md
new file mode 100644
index 000000000..c061e27fe
--- /dev/null
+++ b/docs/podman-volume-ls.1.md
@@ -0,0 +1,49 @@
+% podman-volume-ls(1)
+
+## NAME
+podman\-volume\-ls - List volumes
+
+## SYNOPSIS
+**podman volume ls** [*options*]
+
+## DESCRIPTION
+
+Lists all the volumes that exist. The output can be filtered using the **--filter**
+flag and can be formatted to either JSON or a Go template using the **--format**
+flag. Use the **--quiet** flag to print only the volume names.
+
+## OPTIONS
+
+**--filter**=""
+
+Filter volume output.
+
+**--format**=""
+
+Format volume output using Go template.
+
+**--help**
+
+Print usage statement.
+
+**-q**, **--quiet**=[]
+
+Print volume output in quiet mode. Only print the volume names.
+
+## EXAMPLES
+
+```
+$ podman volume ls
+
+$ podman volume ls --format json
+
+$ podman volume ls --format "{{.Driver}} {{.Scope}}"
+
+$ podman volume ls --filter name=foo,label=blue
+```
+
+## SEE ALSO
+podman-volume(1)
+
+## HISTORY
+November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/podman-volume-prune.1.md b/docs/podman-volume-prune.1.md
new file mode 100644
index 000000000..a06bb2fa4
--- /dev/null
+++ b/docs/podman-volume-prune.1.md
@@ -0,0 +1,38 @@
+% podman-volume-prune(1)
+
+## NAME
+podman\-volume\-prune - Remove all unused volumes
+
+## SYNOPSIS
+**podman volume rm** [*options*]
+
+## DESCRIPTION
+
+Removes all unused volumes. You will be prompted to confirm the removal of all the
+unused volumes. To bypass the confirmation, use the **--force** flag.
+
+
+## OPTIONS
+
+**-f**, **--force**=""
+
+Do not prompt for confirmation.
+
+**--help**
+
+Print usage statement
+
+
+## EXAMPLES
+
+```
+$ podman volume prune
+
+$ podman volume prune --force
+```
+
+## SEE ALSO
+podman-volume(1)
+
+## HISTORY
+November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/podman-volume-rm.1.md b/docs/podman-volume-rm.1.md
new file mode 100644
index 000000000..c23d7675c
--- /dev/null
+++ b/docs/podman-volume-rm.1.md
@@ -0,0 +1,45 @@
+% podman-volume-rm(1)
+
+## NAME
+podman\-volume\-rm - Remove one or more volumes
+
+## SYNOPSIS
+**podman volume rm** [*options*]
+
+## DESCRIPTION
+
+Removes one ore more volumes. Only volumes that are not being used will be removed.
+If a volume is being used by a container, an error will be returned unless the **--force**
+flag is being used. To remove all the volumes, use the **--all** flag.
+
+
+## OPTIONS
+
+**-a**, **--all**=""
+
+Remove all volumes.
+
+**-f**, **--force**=""
+
+Remove a volume by force, even if it is being used by a container
+
+**--help**
+
+Print usage statement
+
+
+## EXAMPLES
+
+```
+$ podman volume rm myvol1 myvol2
+
+$ podman volume rm --all
+
+$ podman volume rm --force myvol
+```
+
+## SEE ALSO
+podman-volume(1)
+
+## HISTORY
+November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/podman-volume.1.md b/docs/podman-volume.1.md
new file mode 100644
index 000000000..ac32abbd6
--- /dev/null
+++ b/docs/podman-volume.1.md
@@ -0,0 +1,23 @@
+% podman-volume(1)
+
+## NAME
+podman\-volume - Simple management tool for volumes.
+
+## SYNOPSIS
+**podman volume** *subcommand*
+
+## DESCRIPTION
+podman volume is a set of subcommands that manage volumes.
+
+## SUBCOMMANDS
+
+| Subcommand | Description |
+| ------------------------------------------------- | ------------------------------------------------------------------------------ |
+| [podman-volume-create(1)](podman-volume-create.1.md) | Create a new volume. |
+| [podman-volume-inspect(1)](podman-volume-inspect.1.md) | Get detailed information on one or more volumes. |
+| [podman-volume-ls(1)](podman-volume-ls.1.md) | List all the available volumes. |
+| [podman-volume-rm(1)](podman-volume-rm.1.md) | Remove one or more volumes. |
+| [podman-volume-prune(1)](podman-volume-prune.1.md) | Remove all unused volumes. |
+
+## HISTORY
+November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/podman.1.md b/docs/podman.1.md
index 085af97ff..bde349e6f 100644
--- a/docs/podman.1.md
+++ b/docs/podman.1.md
@@ -31,6 +31,18 @@ CGroup manager to use for container cgroups. Supported values are cgroupfs or sy
Path to where the cpu performance results should be written
+**--hooks-dir**=**path**
+
+Each `*.json` file in the path configures a hook for Podman containers. For more details on the syntax of the JSON files and the semantics of hook injection, see `oci-hooks(5)`. Podman and libpod currently support both the 1.0.0 and 0.1.0 hook schemas, although the 0.1.0 schema is deprecated.
+
+This option may be set multiple times; paths from later options have higher precedence (`oci-hooks(5)` discusses directory precedence).
+
+For the annotation conditions, libpod uses any annotations set in the generated OCI configuration.
+
+For the bind-mount conditions, only mounts explicitly requested by the caller via `--volume` are considered. Bind mounts that libpod inserts by default (e.g. `/dev/shm`) are not considered.
+
+If `--hooks-dir` is unset for root callers, Podman and libpod will currently default to `/usr/share/containers/oci/hooks.d` and `/etc/containers/oci/hooks.d` in order of increasing precedence. Using these defaults is deprecated, and callers should migrate to explicitly setting `--hooks-dir`.
+
**--log-level**
Log messages above specified level: debug, info, warn, error (default), fatal or panic
@@ -56,7 +68,7 @@ Path to the OCI compatible binary used to run containers
**--storage-driver, -s**=**value**
-Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for other users. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
+Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for non-root users when *fuse-overlayfs* is not available. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
Overriding this option will cause the *storage-opt* settings in /etc/containers/storage.conf to be ignored. The user must
specify additional options via the `--storage-opt` flag.
@@ -161,18 +173,6 @@ the exit codes follow the `chroot` standard, see below:
The mounts.conf file specifies volume mount directories that are automatically mounted inside containers when executing the `podman run` or `podman start` commands. When Podman runs in rootless mode, the file `$HOME/.config/containers/mounts.conf` is also used. Please refer to containers-mounts.conf(5) for further details.
-**OCI hooks JSON** (`/etc/containers/oci/hooks.d/*.json`, `/usr/share/containers/oci/hooks.d/*.json`)
-
- Each `*.json` file in `/etc/containers/oci/hooks.d` and `/usr/share/containers/oci/hooks.d` configures a hook for Podman containers, with `/etc/containers/oci/hooks.d` having higher precedence. For more details on the syntax of the JSON files and the semantics of hook injection, see `oci-hooks(5)`.
-
- Podman and libpod currently support both the 1.0.0 and 0.1.0 hook schemas, although the 0.1.0 schema is deprecated.
-
- For the annotation conditions, libpod uses any annotations set in the generated OCI configuration.
-
- For the bind-mount conditions, only mounts explicitly requested by the caller via `--volume` are considered. Bind mounts that libpod inserts by default (e.g. `/dev/shm`) are not considered.
-
- Hooks are not used when running in rootless mode.
-
**policy.json** (`/etc/containers/policy.json`)
Signature verification policy files are used to specify policy, e.g. trusted keys, applicable when deciding whether to accept an image, or individual signatures of that image, as valid.
@@ -192,7 +192,7 @@ the exit codes follow the `chroot` standard, see below:
When Podman runs in rootless mode, the file `$HOME/.config/containers/storage.conf` is also loaded.
## Rootless mode
-Podman can also be used as non-root user. When podman runs in rootless mode, an user namespace is automatically created.
+Podman can also be used as non-root user. When podman runs in rootless mode, a user namespace is automatically created for the user, defined in /etc/subuid and /etc/subgid.
Containers created by a non-root user are not visible to other users and are not seen or managed by podman running as root.
@@ -209,13 +209,14 @@ Or just add the content manually.
$ echo USERNAME:10000:65536 >> /etc/subuid
$ echo USERNAME:10000:65536 >> /etc/subgid
+See the `subuid(5)` and `subgid(5)` man pages for more information.
+
Images are pulled under `XDG_DATA_HOME` when specified, otherwise in the home directory of the user under `.local/share/containers/storage`.
-Currently it is not possible to create a network device, so rootless containers need to run in the host network namespace. If a rootless container creates a network namespace,
-then only the loopback device will be available.
+Currently the slirp4netns package is required to be installed to create a network device, otherwise rootless containers need to run in the network namespace of the host.
## SEE ALSO
-`containers-mounts.conf(5)`, `containers-registries.conf(5)`, `containers-storage.conf(5)`, `crio(8)`, `libpod.conf(5)`, `oci-hooks(5)`, `policy.json(5)`
+`containers-mounts.conf(5)`, `containers-registries.conf(5)`, `containers-storage.conf(5)`, `crio(8)`, `libpod.conf(5)`, `oci-hooks(5)`, `policy.json(5)`, `subuid(5)`, `subgid(5)`, `slirp4netns(1)`
## HISTORY
Dec 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com>
diff --git a/docs/tutorials/podman_tutorial.md b/docs/tutorials/podman_tutorial.md
index f11083a9e..f8332c820 100644
--- a/docs/tutorials/podman_tutorial.md
+++ b/docs/tutorials/podman_tutorial.md
@@ -5,10 +5,13 @@ Podman is a utility provided as part of the libpod library. It can be used to c
containers. The following tutorial will teach you how to set up Podman and perform some basic
commands with Podman.
+**NOTE**: the code samples are intended to be run as a non-root user, and use `sudo` where
+root escalation is required.
+
## Install Podman on Fedora from RPM Repositories
Fedora 27 and later provide Podman via the package manager.
```console
-$ sudo dnf install -y podman
+sudo dnf install -y podman
```
## Install Podman on Fedora from Source
@@ -18,10 +21,10 @@ acquire the source, and build it.
### Installing build and runtime dependencies
```console
-$ sudo dnf install -y git runc libassuan-devel golang golang-github-cpuguy83-go-md2man glibc-static \
- gpgme-devel glib2-devel device-mapper-devel libseccomp-devel \
- atomic-registries iptables skopeo-containers containernetworking-cni \
- conmon
+sudo dnf install -y git runc libassuan-devel golang golang-github-cpuguy83-go-md2man glibc-static \
+ gpgme-devel glib2-devel device-mapper-devel libseccomp-devel \
+ atomic-registries iptables skopeo-containers containernetworking-cni \
+ conmon ostree-devel
```
### Building and installing podman
@@ -29,12 +32,12 @@ First, configure a `GOPATH` (if you are using go1.8 or later, this defaults to `
and make libpod.
```console
-$ export GOPATH=~/go
-$ mkdir -p $GOPATH
-$ git clone https://github.com/containers/libpod/ $GOPATH/src/github.com/containers/libpod
-$ cd $GOPATH/src/github.com/containers/libpod
-$ make
-$ sudo make install PREFIX=/usr
+export GOPATH=~/go
+mkdir -p $GOPATH
+git clone https://github.com/containers/libpod/ $GOPATH/src/github.com/containers/libpod
+cd $GOPATH/src/github.com/containers/libpod
+make
+sudo make install PREFIX=/usr
```
You now have a working podman environment. Jump to [Familiarizing yourself with Podman](#familiarizing-yourself-with-podman)
@@ -50,8 +53,8 @@ tutorial. For this tutorial, the Ubuntu **artful-server-cloudimg** image was use
#### Installing base packages
```console
-$ sudo apt-get update
-$ sudo apt-get install libdevmapper-dev libglib2.0-dev libgpgme11-dev golang libseccomp-dev \
+sudo apt-get update
+sudo apt-get install libdevmapper-dev libglib2.0-dev libgpgme11-dev golang libseccomp-dev libostree-dev \
go-md2man libprotobuf-dev libprotobuf-c0-dev libseccomp-dev python3-setuptools
```
#### Building and installing conmon
@@ -59,27 +62,27 @@ First, configure a `GOPATH` (if you are using go1.8 or later, this defaults to `
and make libpod.
```console
-$ export GOPATH=~/go
-$ mkdir -p $GOPATH
-$ git clone https://github.com/kubernetes-sigs/cri-o $GOPATH/src/github.com/kubernetes-sigs/cri-o
-$ cd $GOPATH/src/github.com/kubernetes-sigs/cri-o
-$ mkdir bin
-$ make bin/conmon
-$ sudo install -D -m 755 bin/conmon /usr/libexec/podman/conmon
+export GOPATH=~/go
+mkdir -p $GOPATH
+git clone https://github.com/kubernetes-sigs/cri-o $GOPATH/src/github.com/kubernetes-sigs/cri-o
+cd $GOPATH/src/github.com/kubernetes-sigs/cri-o
+mkdir bin
+make bin/conmon
+sudo install -D -m 755 bin/conmon /usr/libexec/podman/conmon
```
#### Adding required configuration files
```console
-$ sudo mkdir -p /etc/containers
-$ sudo curl https://raw.githubusercontent.com/projectatomic/registries/master/registries.fedora -o /etc/containers/registries.conf
-$ sudo curl https://raw.githubusercontent.com/containers/skopeo/master/default-policy.json -o /etc/containers/policy.json
+sudo mkdir -p /etc/containers
+sudo curl https://raw.githubusercontent.com/projectatomic/registries/master/registries.fedora -o /etc/containers/registries.conf
+sudo curl https://raw.githubusercontent.com/containers/skopeo/master/default-policy.json -o /etc/containers/policy.json
```
#### Installing CNI plugins
```console
-$ git clone https://github.com/containernetworking/plugins.git $GOPATH/src/github.com/containernetworking/plugins
-$ cd $GOPATH/src/github.com/containernetworking/plugins
-$ ./build.sh
-$ sudo mkdir -p /usr/libexec/cni
-$ sudo cp bin/* /usr/libexec/cni
+git clone https://github.com/containernetworking/plugins.git $GOPATH/src/github.com/containernetworking/plugins
+cd $GOPATH/src/github.com/containernetworking/plugins
+./build_linux.sh
+sudo mkdir -p /usr/libexec/cni
+sudo cp bin/* /usr/libexec/cni
```
#### Installing CNI config
Add a most basic network config
@@ -89,18 +92,18 @@ curl -qsSL https://raw.githubusercontent.com/containers/libpod/master/cni/87-pod
```
#### Installing runc
```console
-$ git clone https://github.com/opencontainers/runc.git $GOPATH/src/github.com/opencontainers/runc
-$ cd $GOPATH/src/github.com/opencontainers/runc
-$ make BUILDTAGS="seccomp"
-$ sudo cp runc /usr/bin/runc
+git clone https://github.com/opencontainers/runc.git $GOPATH/src/github.com/opencontainers/runc
+cd $GOPATH/src/github.com/opencontainers/runc
+make BUILDTAGS="seccomp"
+sudo cp runc /usr/bin/runc
```
### Building and installing Podman
```console
-$ git clone https://github.com/containers/libpod/ $GOPATH/src/github.com/containers/libpod
-$ cd $GOPATH/src/github.com/containers/libpod
-$ make
-$ sudo make install PREFIX=/usr
+git clone https://github.com/containers/libpod/ $GOPATH/src/github.com/containers/libpod
+cd $GOPATH/src/github.com/containers/libpod
+make
+sudo make install PREFIX=/usr
```
## Familiarizing yourself with Podman
@@ -109,10 +112,10 @@ $ sudo make install PREFIX=/usr
This sample container will run a very basic httpd server that serves only its index
page.
```console
-$ sudo podman run -dt -e HTTPD_VAR_RUN=/var/run/httpd -e HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \
- -e HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \
- -e HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ \
- registry.fedoraproject.org/f27/httpd /usr/bin/run-httpd
+sudo podman run -dt -e HTTPD_VAR_RUN=/var/run/httpd -e HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \
+ -e HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \
+ -e HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ \
+ registry.fedoraproject.org/f27/httpd /usr/bin/run-httpd
```
Because the container is being run in detached mode, represented by the *-d* in the podman run command, podman
will print the container ID after it has run.
@@ -120,7 +123,7 @@ will print the container ID after it has run.
### Listing running containers
The Podman *ps* command is used to list creating and running containers.
```console
-$ sudo podman ps
+sudo podman ps
```
Note: If you add *-a* to the *ps* command, Podman will show all containers.
@@ -132,7 +135,7 @@ $ sudo podman inspect -l | grep IPAddress\":
"IPAddress": "10.88.6.140",
```
-Note: The -l is convenience arguement for **latest container**. You can also use the container's ID instead
+Note: The -l is a convenience argument for **latest container**. You can also use the container's ID instead
of -l.
### Testing the httpd server
@@ -140,7 +143,7 @@ Now that we have the IP address of the container, we can test the network commun
operating system and the container using curl. The following command should display the index page of our
containerized httpd server.
```console
-# curl http://<IP_address>:8080
+curl http://<IP_address>:8080
```
### Viewing the container's logs
@@ -169,7 +172,7 @@ With this a container can later be restored and continue running at exactly the
checkpoint. This capability requires CRIU 3.11 or later installed on the system.
To checkpoint the container use:
```console
-$ sudo podman container checkpoint <container_id>
+sudo podman container checkpoint <container_id>
```
### Restoring the container
@@ -177,29 +180,29 @@ Restoring a container is only possible for a previously checkpointed container.
continue to run at exactly the same point in time it was checkpointed.
To restore the container use:
```console
-$ sudo podman container restore <container_id>
+sudo podman container restore <container_id>
```
After being restored, the container will answer requests again as it did before checkpointing.
```console
-# curl http://<IP_address>:8080
+curl http://<IP_address>:8080
```
### Stopping the container
To stop the httpd container:
```console
-$ sudo podman stop --latest
+sudo podman stop --latest
```
You can also check the status of one or more containers using the *ps* subcommand. In this case, we should
use the *-a* argument to list all containers.
```console
-$ sudo podman ps -a
+sudo podman ps -a
```
### Removing the container
To remove the httpd container:
```console
-$ sudo podman rm --latest
+sudo podman rm --latest
```
You can verify the deletion of the container by running *podman ps -a*.
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
new file mode 100755
index 000000000..e9a755dd4
--- /dev/null
+++ b/hack/get_ci_vm.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+set -e
+
+cd $(dirname $0)/../
+
+VMNAME="${USER}-twidling-$1"
+# TODO: Many/most of these values should come from .cirrus.yml
+ZONE="us-central1-a"
+CPUS="2"
+MEMORY="4Gb"
+DISK="200"
+PROJECT="libpod-218412"
+GOSRC="/var/tmp/go/src/github.com/containers/libpod"
+
+PGCLOUD="sudo podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER -v /home/$USER:$HOME:z quay.io/cevich/gcloud_centos:latest"
+CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=$1 --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $VMNAME"
+SSH_CMD="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no -F /dev/null"
+CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME"
+
+# COLOR!
+RED="\e[1;36;41m"
+YEL="\e[1;33;44m"
+NOR="\e[0m"
+
+if [[ -z "$1" ]]
+then
+ echo -e "\n${RED}Error: No image-name specified. Some possible values (from .cirrus.yml).${NOR}"
+ egrep 'image_name' ".cirrus.yml" | grep -v '#' | cut -d: -f 2 | tr -d [:blank:]
+ exit 1
+fi
+
+echo -e "\n${YEL}WARNING: This will not work without local sudo access to run podman,${NOR}"
+echo -e " ${YEL}and prior authorization to use the libpod GCP project. Also,${NOR}"
+echo -e " ${YEL}possession of the proper ssh private key is required.${NOR}"
+
+if [[ "$USER" =~ "root" ]]
+then
+ echo -e "\n${RED}ERROR: This script must be run as a regular user${NOR}"
+ exit 2
+fi
+
+if [[ ! -r "$HOME/.config/gcloud/active_config" ]]
+then
+ echo -e "\n${RED}ERROR: Can't find gcloud configuration, attempting to run init.${NOR}"
+ $PGCLOUD init --project=$PROJECT
+fi
+
+cleanup() {
+ echo -e "\n${YEL}Deleting $VMNAME ${RED}(Might take a minute or two)${NOR}
++ $CLEANUP_CMD
+"
+ $CLEANUP_CMD # prompts for Yes/No
+}
+
+trap cleanup EXIT
+
+echo -e "\n${YEL}Trying to creating a VM named $VMNAME (not fatal if already exists).${NOR}"
+echo "+ $CREATE_CMD"
+$CREATE_CMD || true # allow re-running commands below when "delete: N"
+
+echo -e "\n${YEL}Attempting to retrieve IP address of existing ${VMNAME}${NOR}."
+IP=`$PGCLOUD compute instances list --filter=name=$VMNAME --limit=1 '--format=csv(networkInterfaces.accessConfigs.natIP)' | tr --complement --delete .[:digit:]`
+
+echo -e "\n${YEL}Creating $GOSRC directory.${NOR}"
+SSH_MKDIR="$SSH_CMD root@$IP mkdir -vp $GOSRC"
+echo "+ $SSH_MKDIR"
+$SSH_MKDIR
+
+echo -e "\n${YEL}Synchronizing local repository to $IP:${GOSRC}${NOR} ."
+export RSYNC_RSH="$SSH_CMD"
+RSYNC_CMD="rsync --quiet --recursive --update --links --safe-links --perms --sparse $PWD/ root@$IP:$GOSRC/"
+echo "+ export RSYNC_RSH=\"$SSH_CMD\""
+echo "+ $RSYNC_CMD"
+$RSYNC_CMD
+
+echo -e "\n${YEL}Executing environment setup${NOR}"
+ENV_CMD="$SSH_CMD root@$IP env CI=true $GOSRC/contrib/cirrus/setup_environment.sh"
+echo "+ $ENV_CMD"
+$SSH_CMD root@$IP $GOSRC/contrib/cirrus/setup_environment.sh
+
+echo -e "\n${YEL}Connecting to $VMNAME ${RED}(option to delete VM upon logout).${NOR}"
+SSH_CMD="$SSH_CMD -t root@$IP"
+echo "+ $SSH_CMD"
+$SSH_CMD "cd $GOSRC ; bash -il"
diff --git a/install.md b/install.md
index 33224c810..efb568b66 100644
--- a/install.md
+++ b/install.md
@@ -83,7 +83,7 @@ Debian, Ubuntu, and related distributions will also need to do the following set
If using an older release or a long-term support release, be careful to double-check that the version of `runc` is new enough (running `runc --version` should produce `spec: 1.0.0`), or else [build](https://github.com/containers/libpod/blob/master/docs/tutorials/podman_tutorial.md#installing-runc) your own.
-Be careful to double-check that the version of golang is new enough, version 1.8.x or higher is required. If needed, golang kits are available at https://golang.org/dl/
+Be careful to double-check that the version of golang is new enough, version 1.10.x or higher is required. If needed, golang kits are available at https://golang.org/dl/
**Optional**
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 42f029379..b154d8bda 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -3,7 +3,6 @@ package libpod
import (
"bytes"
"encoding/json"
- "os"
"strings"
"sync"
@@ -19,7 +18,6 @@ type BoltState struct {
dbLock sync.Mutex
namespace string
namespaceBytes []byte
- lockDir string
runtime *Runtime
}
@@ -52,25 +50,15 @@ type BoltState struct {
// containers/storage do not occur.
// NewBoltState creates a new bolt-backed state database
-func NewBoltState(path, lockDir string, runtime *Runtime) (State, error) {
+func NewBoltState(path string, runtime *Runtime) (State, error) {
state := new(BoltState)
state.dbPath = path
- state.lockDir = lockDir
state.runtime = runtime
state.namespace = ""
state.namespaceBytes = nil
logrus.Debugf("Initializing boltdb state at %s", path)
- // Make the directory that will hold container lockfiles
- if err := os.MkdirAll(lockDir, 0750); err != nil {
- // The directory is allowed to exist
- if !os.IsExist(err) {
- return nil, errors.Wrapf(err, "error creating lockfiles dir %s", lockDir)
- }
- }
- state.lockDir = lockDir
-
db, err := bolt.Open(path, 0600, nil)
if err != nil {
return nil, errors.Wrapf(err, "error opening database %s", path)
@@ -106,6 +94,12 @@ func NewBoltState(path, lockDir string, runtime *Runtime) (State, error) {
if _, err := tx.CreateBucketIfNotExists(allPodsBkt); err != nil {
return errors.Wrapf(err, "error creating all pods bucket")
}
+ if _, err := tx.CreateBucketIfNotExists(volBkt); err != nil {
+ return errors.Wrapf(err, "error creating volume bucket")
+ }
+ if _, err := tx.CreateBucketIfNotExists(allVolsBkt); err != nil {
+ return errors.Wrapf(err, "error creating all volumes bucket")
+ }
if _, err := tx.CreateBucketIfNotExists(runtimeConfigBkt); err != nil {
return errors.Wrapf(err, "error creating runtime-config bucket")
}
@@ -115,11 +109,6 @@ func NewBoltState(path, lockDir string, runtime *Runtime) (State, error) {
return nil, errors.Wrapf(err, "error creating initial database layout")
}
- // Check runtime configuration
- if err := checkRuntimeConfig(db, runtime); err != nil {
- return nil, err
- }
-
state.valid = true
return state, nil
@@ -240,6 +229,72 @@ func (s *BoltState) Refresh() error {
return err
}
+// GetDBConfig retrieves runtime configuration fields that were created when
+// the database was first initialized
+func (s *BoltState) GetDBConfig() (*DBConfig, error) {
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ cfg := new(DBConfig)
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ configBucket, err := getRuntimeConfigBucket(tx)
+ if err != nil {
+ return nil
+ }
+
+ // Some of these may be nil
+ // When we convert to string, Go will coerce them to ""
+ // That's probably fine - we could raise an error if the key is
+ // missing, but just not including it is also OK.
+ libpodRoot := configBucket.Get(staticDirKey)
+ libpodTmp := configBucket.Get(tmpDirKey)
+ storageRoot := configBucket.Get(graphRootKey)
+ storageTmp := configBucket.Get(runRootKey)
+ graphDriver := configBucket.Get(graphDriverKey)
+
+ cfg.LibpodRoot = string(libpodRoot)
+ cfg.LibpodTmp = string(libpodTmp)
+ cfg.StorageRoot = string(storageRoot)
+ cfg.StorageTmp = string(storageTmp)
+ cfg.GraphDriver = string(graphDriver)
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return cfg, nil
+}
+
+// ValidateDBConfig validates paths in the given runtime against the database
+func (s *BoltState) ValidateDBConfig(runtime *Runtime) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ // Check runtime configuration
+ if err := checkRuntimeConfig(db, runtime); err != nil {
+ return err
+ }
+
+ return nil
+}
+
// SetNamespace sets the namespace that will be used for container and pod
// retrieval
func (s *BoltState) SetNamespace(ns string) error {
@@ -1101,6 +1156,378 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
return ctrs, nil
}
+// AddVolume adds the given volume to the state. It also adds ctrDepID to
+// the sub bucket holding the container dependencies that this volume has
+func (s *BoltState) AddVolume(volume *Volume) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !volume.valid {
+ return ErrVolumeRemoved
+ }
+
+ volName := []byte(volume.Name())
+
+ volConfigJSON, err := json.Marshal(volume.config)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling volume %s config to JSON", volume.Name())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ allVolsBkt, err := getAllVolsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ // Check if we already have a volume with the given name
+ volExists := allVolsBkt.Get(volName)
+ if volExists != nil {
+ return errors.Wrapf(ErrVolumeExists, "name %s is in use", volume.Name())
+ }
+
+ // We are good to add the volume
+ // Make a bucket for it
+ newVol, err := volBkt.CreateBucket(volName)
+ if err != nil {
+ return errors.Wrapf(err, "error creating bucket for volume %s", volume.Name())
+ }
+
+ // Make a subbucket for the containers using the volume. Dependent container IDs will be addedremoved to
+ // this bucket in addcontainer/removeContainer
+ if _, err := newVol.CreateBucket(volDependenciesBkt); err != nil {
+ return errors.Wrapf(err, "error creating bucket for containers using volume %s", volume.Name())
+ }
+
+ if err := newVol.Put(configKey, volConfigJSON); err != nil {
+ return errors.Wrapf(err, "error storing volume %s configuration in DB", volume.Name())
+ }
+
+ if err := allVolsBkt.Put(volName, volName); err != nil {
+ return errors.Wrapf(err, "error storing volume %s in all volumes bucket in DB", volume.Name())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RemoveVolCtrDep updates the container dependencies sub bucket of the given volume.
+// It deletes it from the bucket when found.
+// This is important when force removing a volume and we want to get rid of the dependencies.
+func (s *BoltState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
+ if ctrID == "" {
+ return nil
+ }
+
+ if !s.valid {
+ return ErrDBBadConfig
+ }
+
+ if !volume.valid {
+ return ErrVolumeRemoved
+ }
+
+ volName := []byte(volume.Name())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBkt.Bucket(volName)
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ }
+
+ // Make a subbucket for the containers using the volume
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ depCtrID := []byte(ctrID)
+ if depExists := ctrDepsBkt.Get(depCtrID); depExists != nil {
+ if err := ctrDepsBkt.Delete(depCtrID); err != nil {
+ return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctrID, volume.Name())
+ }
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RemoveVolume removes the given volume from the state
+func (s *BoltState) RemoveVolume(volume *Volume) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !volume.valid {
+ return ErrVolumeRemoved
+ }
+
+ volName := []byte(volume.Name())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ allVolsBkt, err := getAllVolsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ // Check if the volume exists
+ volDB := volBkt.Bucket(volName)
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name())
+ }
+
+ // Check if volume is not being used by any container
+ // This should never be nil
+ // But if it is, we can assume that no containers are using
+ // the volume.
+ volCtrsBkt := volDB.Bucket(volDependenciesBkt)
+ if volCtrsBkt != nil {
+ var deps []string
+ err = volCtrsBkt.ForEach(func(id, value []byte) error {
+ deps = append(deps, string(id))
+ return nil
+ })
+ if err != nil {
+ return errors.Wrapf(err, "error getting list of dependencies from dependencies bucket for volumes %q", volume.Name())
+ }
+ if len(deps) > 0 {
+ return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ","))
+ }
+ }
+
+ // volume is ready for removal
+ // Let's kick it out
+ if err := allVolsBkt.Delete(volName); err != nil {
+ return errors.Wrapf(err, "error removing volume %s from all volumes bucket in DB", volume.Name())
+ }
+ if err := volBkt.DeleteBucket(volName); err != nil {
+ return errors.Wrapf(err, "error removing volume %s from DB", volume.Name())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// AllVolumes returns all volumes present in the state
+func (s *BoltState) AllVolumes() ([]*Volume, error) {
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ volumes := []*Volume{}
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ allVolsBucket, err := getAllVolsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volBucket, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+ err = allVolsBucket.ForEach(func(id, name []byte) error {
+ volExists := volBucket.Bucket(id)
+ // This check can be removed if performance becomes an
+ // issue, but much less helpful errors will be produced
+ if volExists == nil {
+ return errors.Wrapf(ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ }
+
+ volume := new(Volume)
+ volume.config = new(VolumeConfig)
+
+ if err := s.getVolumeFromDB(id, volume, volBucket); err != nil {
+ if errors.Cause(err) != ErrNSMismatch {
+ logrus.Errorf("Error retrieving volume %s from the database: %v", string(id), err)
+ }
+ } else {
+ volumes = append(volumes, volume)
+ }
+
+ return nil
+ })
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return volumes, nil
+}
+
+// Volume retrieves a volume from full name
+func (s *BoltState) Volume(name string) (*Volume, error) {
+ if name == "" {
+ return nil, ErrEmptyID
+ }
+
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ volName := []byte(name)
+
+ volume := new(Volume)
+ volume.config = new(VolumeConfig)
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ return s.getVolumeFromDB(volName, volume, volBkt)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return volume, nil
+}
+
+// HasVolume returns true if the given volume exists in the state, otherwise it returns false
+func (s *BoltState) HasVolume(name string) (bool, error) {
+ if name == "" {
+ return false, ErrEmptyID
+ }
+
+ if !s.valid {
+ return false, ErrDBClosed
+ }
+
+ volName := []byte(name)
+
+ exists := false
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return false, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBkt.Bucket(volName)
+ if volDB != nil {
+ exists = true
+ }
+
+ return nil
+ })
+ if err != nil {
+ return false, err
+ }
+
+ return exists, nil
+}
+
+// VolumeInUse checks if any container is using the volume
+// It returns a slice of the IDs of the containers using the given
+// volume. If the slice is empty, no containers use the given volume
+func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ if !volume.valid {
+ return nil, ErrVolumeRemoved
+ }
+
+ depCtrs := []string{}
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volBucket, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBucket.Bucket([]byte(volume.Name()))
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name())
+ }
+
+ dependsBkt := volDB.Bucket(volDependenciesBkt)
+ if dependsBkt == nil {
+ return errors.Wrapf(ErrInternal, "volume %s has no dependencies bucket", volume.Name())
+ }
+
+ // Iterate through and add dependencies
+ err = dependsBkt.ForEach(func(id, value []byte) error {
+ depCtrs = append(depCtrs, string(id))
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return depCtrs, nil
+}
+
// AddPod adds the given pod to the state.
func (s *BoltState) AddPod(pod *Pod) error {
if !s.valid {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index cc7d106cc..06f8dcb24 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -21,15 +21,25 @@ const (
allCtrsName = "all-ctrs"
podName = "pod"
allPodsName = "allPods"
+ volName = "vol"
+ allVolsName = "allVolumes"
runtimeConfigName = "runtime-config"
- configName = "config"
- stateName = "state"
- dependenciesName = "dependencies"
- netNSName = "netns"
- containersName = "containers"
- podIDName = "pod-id"
- namespaceName = "namespace"
+ configName = "config"
+ stateName = "state"
+ dependenciesName = "dependencies"
+ volCtrDependencies = "vol-dependencies"
+ netNSName = "netns"
+ containersName = "containers"
+ podIDName = "pod-id"
+ namespaceName = "namespace"
+
+ staticDirName = "static-dir"
+ tmpDirName = "tmp-dir"
+ runRootName = "run-root"
+ graphRootName = "graph-root"
+ graphDriverName = "graph-driver-name"
+ osName = "os"
)
var (
@@ -40,30 +50,31 @@ var (
allCtrsBkt = []byte(allCtrsName)
podBkt = []byte(podName)
allPodsBkt = []byte(allPodsName)
+ volBkt = []byte(volName)
+ allVolsBkt = []byte(allVolsName)
runtimeConfigBkt = []byte(runtimeConfigName)
- configKey = []byte(configName)
- stateKey = []byte(stateName)
- dependenciesBkt = []byte(dependenciesName)
- netNSKey = []byte(netNSName)
- containersBkt = []byte(containersName)
- podIDKey = []byte(podIDName)
- namespaceKey = []byte(namespaceName)
+ configKey = []byte(configName)
+ stateKey = []byte(stateName)
+ dependenciesBkt = []byte(dependenciesName)
+ volDependenciesBkt = []byte(volCtrDependencies)
+ netNSKey = []byte(netNSName)
+ containersBkt = []byte(containersName)
+ podIDKey = []byte(podIDName)
+ namespaceKey = []byte(namespaceName)
+
+ staticDirKey = []byte(staticDirName)
+ tmpDirKey = []byte(tmpDirName)
+ runRootKey = []byte(runRootName)
+ graphRootKey = []byte(graphRootName)
+ graphDriverKey = []byte(graphDriverName)
+ osKey = []byte(osName)
)
// Check if the configuration of the database is compatible with the
// configuration of the runtime opening it
// If there is no runtime configuration loaded, load our own
func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
- var (
- staticDir = []byte("static-dir")
- tmpDir = []byte("tmp-dir")
- runRoot = []byte("run-root")
- graphRoot = []byte("graph-root")
- graphDriverName = []byte("graph-driver-name")
- osKey = []byte("os")
- )
-
err := db.Update(func(tx *bolt.Tx) error {
configBkt, err := getRuntimeConfigBucket(tx)
if err != nil {
@@ -74,31 +85,31 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
return err
}
- if err := validateDBAgainstConfig(configBkt, "static dir",
- rt.config.StaticDir, staticDir, ""); err != nil {
+ if err := validateDBAgainstConfig(configBkt, "libpod root directory (staticdir)",
+ rt.config.StaticDir, staticDirKey, ""); err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "tmp dir",
- rt.config.TmpDir, tmpDir, ""); err != nil {
+ if err := validateDBAgainstConfig(configBkt, "libpod temporary files directory (tmpdir)",
+ rt.config.TmpDir, tmpDirKey, ""); err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "run root",
- rt.config.StorageConfig.RunRoot, runRoot,
+ if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)",
+ rt.config.StorageConfig.RunRoot, runRootKey,
storage.DefaultStoreOptions.RunRoot); err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "graph root",
- rt.config.StorageConfig.GraphRoot, graphRoot,
+ if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)",
+ rt.config.StorageConfig.GraphRoot, graphRootKey,
storage.DefaultStoreOptions.GraphRoot); err != nil {
return err
}
- return validateDBAgainstConfig(configBkt, "graph driver name",
+ return validateDBAgainstConfig(configBkt, "storage graph driver",
rt.config.StorageConfig.GraphDriverName,
- graphDriverName,
+ graphDriverKey,
storage.DefaultStoreOptions.GraphDriverName)
})
@@ -229,6 +240,22 @@ func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
return bkt, nil
}
+func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(volBkt)
+ if bkt == nil {
+ return nil, errors.Wrapf(ErrDBBadConfig, "volumes bucket not found in DB")
+ }
+ return bkt, nil
+}
+
+func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(allVolsBkt)
+ if bkt == nil {
+ return nil, errors.Wrapf(ErrDBBadConfig, "all volumes bucket not found in DB")
+ }
+ return bkt, nil
+}
+
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(runtimeConfigBkt)
if bkt == nil {
@@ -261,7 +288,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
}
// Get the lock
- lockPath := filepath.Join(s.lockDir, string(id))
+ lockPath := filepath.Join(s.runtime.lockDir, string(id))
lock, err := storage.GetLockfile(lockPath)
if err != nil {
return errors.Wrapf(err, "error retrieving lockfile for container %s", string(id))
@@ -297,7 +324,7 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
}
// Get the lock
- lockPath := filepath.Join(s.lockDir, string(id))
+ lockPath := filepath.Join(s.runtime.lockDir, string(id))
lock, err := storage.GetLockfile(lockPath)
if err != nil {
return errors.Wrapf(err, "error retrieving lockfile for pod %s", string(id))
@@ -310,6 +337,35 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
return nil
}
+func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bucket) error {
+ volDB := volBkt.Bucket(name)
+ if volDB == nil {
+ return errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found", string(name))
+ }
+
+ volConfigBytes := volDB.Get(configKey)
+ if volConfigBytes == nil {
+ return errors.Wrapf(ErrInternal, "volume %s is missing configuration key in DB", string(name))
+ }
+
+ if err := json.Unmarshal(volConfigBytes, volume.config); err != nil {
+ return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
+ }
+
+ // Get the lock
+ lockPath := filepath.Join(s.runtime.lockDir, string(name))
+ lock, err := storage.GetLockfile(lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
+ }
+ volume.lock = lock
+
+ volume.runtime = s.runtime
+ volume.valid = true
+
+ return nil
+}
+
// Add a container to the DB
// If pod is not nil, the container is added to the pod as well
func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
@@ -371,6 +427,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return err
}
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
// If a pod was given, check if it exists
var podDB *bolt.Bucket
var podCtrs *bolt.Bucket
@@ -503,6 +564,27 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
}
+ // Add container to volume dependencies bucket if container is using a named volume
+ if ctr.runtime.config.VolumePath == "" {
+ return nil
+ }
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+ volDB := volBkt.Bucket([]byte(volName))
+ if volDB == nil {
+ return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volName)
+ }
+
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
+ if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
+ return errors.Wrapf(err, "error storing container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
+ }
+ }
+ }
+ }
+
return nil
})
return err
@@ -540,6 +622,11 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return err
}
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
// Does the pod exist?
var podDB *bolt.Bucket
if pod != nil {
@@ -658,5 +745,25 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
}
+ // Remove container from volume dependencies bucket if container is using a named volume
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+
+ volDB := volBkt.Bucket([]byte(volName))
+ if volDB == nil {
+ // Let's assume the volume was already deleted and continue to remove the container
+ continue
+ }
+
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if depExists := ctrDepsBkt.Get(ctrID); depExists != nil {
+ if err := ctrDepsBkt.Delete(ctrID); err != nil {
+ return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
+ }
+ }
+ }
+ }
+
return nil
}
diff --git a/libpod/common/common.go b/libpod/common/common.go
index 932f1f6da..5d10bee36 100644
--- a/libpod/common/common.go
+++ b/libpod/common/common.go
@@ -1,32 +1,9 @@
package common
import (
- "io"
-
- cp "github.com/containers/image/copy"
"github.com/containers/image/types"
)
-// GetCopyOptions constructs a new containers/image/copy.Options{} struct from the given parameters
-func GetCopyOptions(reportWriter io.Writer, signaturePolicyPath string, srcDockerRegistry, destDockerRegistry *DockerRegistryOptions, signing SigningOptions, authFile, manifestType string, forceCompress bool) *cp.Options {
- if srcDockerRegistry == nil {
- srcDockerRegistry = &DockerRegistryOptions{}
- }
- if destDockerRegistry == nil {
- destDockerRegistry = &DockerRegistryOptions{}
- }
- srcContext := srcDockerRegistry.GetSystemContext(signaturePolicyPath, authFile, forceCompress)
- destContext := destDockerRegistry.GetSystemContext(signaturePolicyPath, authFile, forceCompress)
- return &cp.Options{
- RemoveSignatures: signing.RemoveSignatures,
- SignBy: signing.SignBy,
- ReportWriter: reportWriter,
- SourceCtx: srcContext,
- DestinationCtx: destContext,
- ForceManifestMIMEType: manifestType,
- }
-}
-
// GetSystemContext Constructs a new containers/image/types.SystemContext{} struct from the given signaturePolicy path
func GetSystemContext(signaturePolicyPath, authFilePath string, forceCompress bool) *types.SystemContext {
sc := &types.SystemContext{}
diff --git a/libpod/common/docker_registry_options.go b/libpod/common/docker_registry_options.go
deleted file mode 100644
index f79ae0c54..000000000
--- a/libpod/common/docker_registry_options.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package common
-
-import "github.com/containers/image/types"
-
-// DockerRegistryOptions encapsulates settings that affect how we connect or
-// authenticate to a remote registry.
-type DockerRegistryOptions struct {
- // DockerRegistryCreds is the user name and password to supply in case
- // we need to pull an image from a registry, and it requires us to
- // authenticate.
- DockerRegistryCreds *types.DockerAuthConfig
- // DockerCertPath is the location of a directory containing CA
- // certificates which will be used to verify the registry's certificate
- // (all files with names ending in ".crt"), and possibly client
- // certificates and private keys (pairs of files with the same name,
- // except for ".cert" and ".key" suffixes).
- DockerCertPath string
- // DockerInsecureSkipTLSVerify turns off verification of TLS
- // certificates and allows connecting to registries without encryption.
- DockerInsecureSkipTLSVerify bool
-}
-
-// GetSystemContext constructs a new system context from the given signaturePolicy path and the
-// values in the DockerRegistryOptions
-func (o DockerRegistryOptions) GetSystemContext(signaturePolicyPath, authFile string, forceCompress bool) *types.SystemContext {
- sc := &types.SystemContext{
- SignaturePolicyPath: signaturePolicyPath,
- DockerAuthConfig: o.DockerRegistryCreds,
- DockerCertPath: o.DockerCertPath,
- DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify,
- AuthFilePath: authFile,
- DirForceCompress: forceCompress,
- }
- return sc
-}
diff --git a/libpod/common/output_interfaces.go b/libpod/common/output_interfaces.go
deleted file mode 100644
index 805d0c79a..000000000
--- a/libpod/common/output_interfaces.go
+++ /dev/null
@@ -1 +0,0 @@
-package common
diff --git a/libpod/common_test.go b/libpod/common_test.go
index b7fee2764..81c8f1920 100644
--- a/libpod/common_test.go
+++ b/libpod/common_test.go
@@ -74,6 +74,11 @@ func getTestContainer(id, name, locksDir string) (*Container, error) {
"/test/file.test": "/test2/file2.test",
},
},
+ runtime: &Runtime{
+ config: &RuntimeConfig{
+ VolumePath: "/does/not/exist/tmp/volumes",
+ },
+ },
valid: true,
}
diff --git a/libpod/container.go b/libpod/container.go
index 7bb5b2687..18d867f41 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -9,6 +9,7 @@ import (
"github.com/containernetworking/cni/pkg/types"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
+ "github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -296,6 +297,8 @@ type ContainerConfig struct {
HostAdd []string `json:"hostsAdd,omitempty"`
// Network names (CNI) to add container to. Empty to use default network.
Networks []string `json:"networks,omitempty"`
+ // Network mode specified for the default network.
+ NetMode namespaces.NetworkMode `json:"networkMode,omitempty"`
// Image Config
@@ -826,7 +829,7 @@ func (c *Container) IPs() ([]net.IPNet, error) {
}
if !c.config.CreateNetNS {
- return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod")
+ return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
}
ips := make([]net.IPNet, 0)
@@ -854,7 +857,7 @@ func (c *Container) Routes() ([]types.Route, error) {
}
if !c.config.CreateNetNS {
- return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod")
+ return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
}
routes := make([]types.Route, 0)
@@ -996,3 +999,30 @@ func (c *Container) IsInfra() bool {
func (c *Container) IsReadOnly() bool {
return c.config.Spec.Root.Readonly
}
+
+// NetworkDisabled returns whether the container is running with a disabled network
+func (c *Container) NetworkDisabled() (bool, error) {
+ if c.config.NetNsCtr != "" {
+ container, err := c.runtime.state.Container(c.config.NetNsCtr)
+ if err != nil {
+ return false, err
+ }
+ return networkDisabled(container)
+ }
+ return networkDisabled(c)
+
+}
+
+func networkDisabled(c *Container) (bool, error) {
+ if c.config.CreateNetNS {
+ return false, nil
+ }
+ if !c.config.PostConfigureNetNS {
+ for _, ns := range c.config.Spec.Linux.Namespaces {
+ if ns.Type == spec.NetworkNamespace {
+ return ns.Path == "", nil
+ }
+ }
+ }
+ return false, nil
+}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index d99aec5b4..09bc46905 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -39,7 +39,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -93,7 +93,7 @@ func (c *Container) Start(ctx context.Context) (err error) {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -159,7 +159,7 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return nil, errors.Wrapf(err, "error checking dependencies for container %s")
+ return nil, errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -328,25 +328,25 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user string) e
if err != nil {
return errors.Wrapf(err, "error exec %s", c.ID())
}
+ chWait := make(chan error)
+ go func() {
+ chWait <- execCmd.Wait()
+ }()
+ defer close(chWait)
pidFile := c.execPidPath(sessionID)
- // 1 second seems a reasonable time to wait
- // See https://github.com/containers/libpod/issues/1495
- const pidWaitTimeout = 1000
+ // 60 second seems a reasonable time to wait
+ // https://github.com/containers/libpod/issues/1495
+ // https://github.com/containers/libpod/issues/1816
+ const pidWaitTimeout = 60000
// Wait until the runtime makes the pidfile
- // TODO: If runtime errors before the PID file is created, we have to
- // wait for timeout here
- if err := WaitForFile(pidFile, pidWaitTimeout*time.Millisecond); err != nil {
- logrus.Debugf("Timed out waiting for pidfile from runtime for container %s exec", c.ID())
-
- // Check if an error occurred in the process before we made a pidfile
- // TODO: Wait() here is a poor choice - is there a way to see if
- // a process has finished, instead of waiting for it to finish?
- if err := execCmd.Wait(); err != nil {
+ exited, err := WaitForFile(pidFile, chWait, pidWaitTimeout*time.Millisecond)
+ if err != nil {
+ if exited {
+ // If the runtime exited, propagate the error we got from the process.
return err
}
-
return errors.Wrapf(err, "timed out waiting for runtime to create pidfile for exec session in container %s", c.ID())
}
@@ -388,7 +388,10 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user string) e
locked = false
}
- waitErr := execCmd.Wait()
+ var waitErr error
+ if !exited {
+ waitErr = <-chWait
+ }
// Lock again
if !c.batched {
@@ -672,22 +675,27 @@ func (c *Container) Batch(batchFunc func(*Container) error) error {
return err
}
-// Sync updates the current state of the container, checking whether its state
-// has changed
-// Sync can only be used inside Batch() - otherwise, it will be done
-// automatically.
-// When called outside Batch(), Sync() is a no-op
+// Sync updates the status of a container by querying the OCI runtime.
+// If the container has not been created inside the OCI runtime, nothing will be
+// done.
+// Most of the time, Podman does not explicitly query the OCI runtime for
+// container status, and instead relies upon exit files created by conmon.
+// This can cause a disconnect between running state and what Podman sees in
+// cases where Conmon was killed unexpected, or runc was upgraded.
+// Running a manual Sync() ensures that container state will be correct in
+// such situations.
func (c *Container) Sync() error {
if !c.batched {
- return nil
+ c.lock.Lock()
+ defer c.lock.Unlock()
}
// If runtime knows about the container, update its status in runtime
// And then save back to disk
if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) {
+ (c.state.State != ContainerStateConfigured) &&
+ (c.state.State != ContainerStateExited) {
oldState := c.state.State
- // TODO: optionally replace this with a stat for the exit file
if err := c.runtime.ociRuntime.updateContainerStatus(c, true); err != nil {
return err
}
@@ -715,7 +723,7 @@ func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err e
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -800,7 +808,7 @@ func (c *Container) Refresh(ctx context.Context) error {
return err
}
- logrus.Debugf("Successfully refresh container %s state")
+ logrus.Debugf("Successfully refresh container %s state", c.ID())
// Initialize the container if it was created in runc
if wasCreated || wasRunning || wasPaused {
@@ -829,9 +837,22 @@ func (c *Container) Refresh(ctx context.Context) error {
return nil
}
+// ContainerCheckpointOptions is a struct used to pass the parameters
+// for checkpointing (and restoring) to the corresponding functions
+type ContainerCheckpointOptions struct {
+ // Keep tells the API to not delete checkpoint artifacts
+ Keep bool
+ // KeepRunning tells the API to keep the container running
+ // after writing the checkpoint to disk
+ KeepRunning bool
+ // TCPEstablished tells the API to checkpoint a container
+ // even if it contains established TCP connections
+ TCPEstablished bool
+}
+
// Checkpoint checkpoints a container
-func (c *Container) Checkpoint(ctx context.Context, keep bool) error {
- logrus.Debugf("Trying to checkpoint container %s", c)
+func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
+ logrus.Debugf("Trying to checkpoint container %s", c.ID())
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -841,12 +862,12 @@ func (c *Container) Checkpoint(ctx context.Context, keep bool) error {
}
}
- return c.checkpoint(ctx, keep)
+ return c.checkpoint(ctx, options)
}
// Restore restores a container
-func (c *Container) Restore(ctx context.Context, keep bool) (err error) {
- logrus.Debugf("Trying to restore container %s", c)
+func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
+ logrus.Debugf("Trying to restore container %s", c.ID())
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -856,5 +877,5 @@ func (c *Container) Restore(ctx context.Context, keep bool) (err error) {
}
}
- return c.restore(ctx, keep)
+ return c.restore(ctx, options)
}
diff --git a/libpod/container_easyjson.go b/libpod/container_easyjson.go
index 041cc08ac..8bf5cb64f 100644
--- a/libpod/container_easyjson.go
+++ b/libpod/container_easyjson.go
@@ -8,6 +8,7 @@ import (
json "encoding/json"
types "github.com/containernetworking/cni/pkg/types"
current "github.com/containernetworking/cni/pkg/types/current"
+ namespaces "github.com/containers/libpod/pkg/namespaces"
storage "github.com/containers/storage"
idtools "github.com/containers/storage/pkg/idtools"
ocicni "github.com/cri-o/ocicni/pkg/ocicni"
@@ -1550,6 +1551,8 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
}
in.Delim(']')
}
+ case "networkMode":
+ out.NetMode = namespaces.NetworkMode(in.String())
case "userVolumes":
if in.IsNull() {
in.Skip()
@@ -2177,6 +2180,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
out.RawByte(']')
}
}
+ if in.NetMode != "" {
+ const prefix string = ",\"networkMode\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.NetMode))
+ }
if len(in.UserVolumes) != 0 {
const prefix string = ",\"userVolumes\":"
if first {
diff --git a/libpod/container_graph_test.go b/libpod/container_graph_test.go
index bba3d7aad..25461f1f4 100644
--- a/libpod/container_graph_test.go
+++ b/libpod/container_graph_test.go
@@ -205,6 +205,7 @@ func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4})
assert.NoError(t, err)
@@ -241,6 +242,7 @@ func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr1.config.ID
@@ -260,6 +262,7 @@ func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
ctr3.config.NetNsCtr = ctr4.config.ID
@@ -281,6 +284,7 @@ func TestBuildContainerGraphFourContainersNoneInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr1.config.NetNsCtr = ctr3.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 9b07198bc..06a0c9f32 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -1,8 +1,11 @@
package libpod
import (
+ "strings"
+
"github.com/containers/libpod/pkg/inspect"
"github.com/cri-o/ocicni/pkg/ocicni"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@@ -48,6 +51,17 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
hostnamePath = getPath
}
+ var mounts []specs.Mount
+ for i, mnt := range spec.Mounts {
+ mounts = append(mounts, mnt)
+ // We only want to show the name of the named volume in the inspect
+ // output, so split the path and get the name out of it.
+ if strings.Contains(mnt.Source, c.runtime.config.VolumePath) {
+ split := strings.Split(mnt.Source[len(c.runtime.config.VolumePath)+1:], "/")
+ mounts[i].Source = split[0]
+ }
+ }
+
data := &inspect.ContainerInspectData{
ID: config.ID,
Created: config.CreatedTime,
@@ -85,7 +99,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
AppArmorProfile: spec.Process.ApparmorProfile,
ExecIDs: execIDs,
GraphDriver: driverData,
- Mounts: spec.Mounts,
+ Mounts: mounts,
Dependencies: c.Dependencies(),
NetworkSettings: &inspect.NetworkSettings{
Bridge: "", // TODO
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index d2f48d661..af17d8495 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -18,10 +18,7 @@ import (
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
- "github.com/containers/libpod/pkg/lookup"
- "github.com/containers/libpod/pkg/resolvconf"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/libpod/pkg/secrets"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
@@ -276,6 +273,27 @@ func (c *Container) setupStorage(ctx context.Context) error {
},
LabelOpts: c.config.LabelOpts,
}
+ if c.config.Privileged {
+ privOpt := func(opt string) bool {
+ for _, privopt := range []string{"nodev", "nosuid", "noexec"} {
+ if opt == privopt {
+ return true
+ }
+ }
+ return false
+ }
+ defOptions, err := storage.GetDefaultMountOptions()
+ if err != nil {
+ return errors.Wrapf(err, "error getting default mount options")
+ }
+ var newOptions []string
+ for _, opt := range defOptions {
+ if !privOpt(opt) {
+ newOptions = append(newOptions, opt)
+ }
+ }
+ options.MountOpts = newOptions
+ }
if c.config.Rootfs == "" {
options.IDMappingOptions = c.config.IDMappings
@@ -583,13 +601,17 @@ func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container
}
func (c *Container) completeNetworkSetup() error {
- if !c.config.PostConfigureNetNS {
+ netDisabled, err := c.NetworkDisabled()
+ if err != nil {
+ return err
+ }
+ if !c.config.PostConfigureNetNS || netDisabled {
return nil
}
if err := c.syncContainer(); err != nil {
return err
}
- if rootless.IsRootless() {
+ if c.config.NetMode == "slirp4netns" {
return c.runtime.setupRootlessNetNS(c)
}
return c.runtime.setupNetNS(c)
@@ -597,10 +619,6 @@ func (c *Container) completeNetworkSetup() error {
// Initialize a container, creating it in the runtime
func (c *Container) init(ctx context.Context) error {
- if err := c.makeBindMounts(); err != nil {
- return err
- }
-
// Generate the OCI spec
spec, err := c.generateSpec(ctx)
if err != nil {
@@ -613,7 +631,7 @@ func (c *Container) init(ctx context.Context) error {
}
// With the spec complete, do an OCI create
- if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, false); err != nil {
+ if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil {
return err
}
@@ -828,28 +846,22 @@ func (c *Container) mountStorage() (string, error) {
return c.state.Mountpoint, nil
}
- if !rootless.IsRootless() {
- // TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts
- mounted, err := mount.Mounted(c.config.ShmDir)
- if err != nil {
- return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir)
- }
+ mounted, err := mount.Mounted(c.config.ShmDir)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir)
+ }
+ if !mounted {
+ shmOptions := fmt.Sprintf("mode=1777,size=%d", c.config.ShmSize)
+ if err := c.mountSHM(shmOptions); err != nil {
+ return "", err
+ }
if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
}
-
- if !mounted {
- shmOptions := fmt.Sprintf("mode=1777,size=%d", c.config.ShmSize)
- if err := c.mountSHM(shmOptions); err != nil {
- return "", err
- }
- if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
- return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
- }
- }
}
+ // TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts
mountPoint := c.config.Rootfs
if mountPoint == "" {
mountPoint, err = c.mount()
@@ -987,86 +999,6 @@ func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
return nil
}
-// Make standard bind mounts to include in the container
-func (c *Container) makeBindMounts() error {
- if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
- return errors.Wrapf(err, "cannot chown run directory %s", c.state.RunDir)
- }
-
- if c.state.BindMounts == nil {
- c.state.BindMounts = make(map[string]string)
- }
-
- // SHM is always added when we mount the container
- c.state.BindMounts["/dev/shm"] = c.config.ShmDir
-
- // Make /etc/resolv.conf
- if _, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/resolv.conf")
- }
- newResolv, err := c.generateResolvConf()
- if err != nil {
- return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
- }
- c.state.BindMounts["/etc/resolv.conf"] = newResolv
-
- newPasswd, err := c.generatePasswd()
- if err != nil {
- return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
- }
- if newPasswd != "" {
- // Make /etc/passwd
- if _, ok := c.state.BindMounts["/etc/passwd"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/passwd")
- }
- logrus.Debugf("adding entry to /etc/passwd for non existent default user")
- c.state.BindMounts["/etc/passwd"] = newPasswd
- }
- // Make /etc/hosts
- if _, ok := c.state.BindMounts["/etc/hosts"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/hosts")
- }
- newHosts, err := c.generateHosts()
- if err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
- }
- c.state.BindMounts["/etc/hosts"] = newHosts
-
- // Make /etc/hostname
- // This should never change, so no need to recreate if it exists
- if _, ok := c.state.BindMounts["/etc/hostname"]; !ok {
- hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname())
- if err != nil {
- return errors.Wrapf(err, "error creating hostname file for container %s", c.ID())
- }
- c.state.BindMounts["/etc/hostname"] = hostnamePath
- }
-
- // Make .containerenv
- // Empty file, so no need to recreate if it exists
- if _, ok := c.state.BindMounts["/run/.containerenv"]; !ok {
- // Empty string for now, but we may consider populating this later
- containerenvPath, err := c.writeStringToRundir(".containerenv", "")
- if err != nil {
- return errors.Wrapf(err, "error creating containerenv file for container %s", c.ID())
- }
- c.state.BindMounts["/run/.containerenv"] = containerenvPath
- }
-
- // Add Secret Mounts
- secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID())
- for _, mount := range secretMounts {
- if _, ok := c.state.BindMounts[mount.Destination]; !ok {
- c.state.BindMounts[mount.Destination] = mount.Source
- }
- }
-
- return nil
-}
-
// writeStringToRundir copies the provided file to the runtimedir
func (c *Container) writeStringToRundir(destFile, output string) (string, error) {
destFileName := filepath.Join(c.state.RunDir, destFile)
@@ -1095,146 +1027,7 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
-// generatePasswd generates a container specific passwd file,
-// iff g.config.User is a number
-func (c *Container) generatePasswd() (string, error) {
- var (
- groupspec string
- group *user.Group
- gid int
- )
- if c.config.User == "" {
- return "", nil
- }
- spec := strings.SplitN(c.config.User, ":", 2)
- userspec := spec[0]
- if len(spec) > 1 {
- groupspec = spec[1]
- }
- // If a non numeric User, then don't generate passwd
- uid, err := strconv.ParseUint(userspec, 10, 32)
- if err != nil {
- return "", nil
- }
- // Lookup the user to see if it exists in the container image
- _, err = lookup.GetUser(c.state.Mountpoint, userspec)
- if err != nil && err != user.ErrNoPasswdEntries {
- return "", err
- }
- if err == nil {
- return "", nil
- }
- if groupspec != "" {
- if !c.state.Mounted {
- return "", errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to translate group field for passwd record", c.ID())
- }
- group, err = lookup.GetGroup(c.state.Mountpoint, groupspec)
- if err != nil {
- if err == user.ErrNoGroupEntries {
- return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
- }
- return "", err
- }
- gid = group.Gid
- }
- originPasswdFile := filepath.Join(c.state.Mountpoint, "/etc/passwd")
- orig, err := ioutil.ReadFile(originPasswdFile)
- if err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile)
- }
-
- pwd := fmt.Sprintf("%s%d:x:%d:%d:container user:%s:/bin/sh\n", orig, uid, uid, gid, c.WorkingDir())
- passwdFile, err := c.writeStringToRundir("passwd", pwd)
- if err != nil {
- return "", errors.Wrapf(err, "failed to create temporary passwd file")
- }
- if os.Chmod(passwdFile, 0644); err != nil {
- return "", err
- }
- return passwdFile, nil
-}
-
-// generateResolvConf generates a containers resolv.conf
-func (c *Container) generateResolvConf() (string, error) {
- // Determine the endpoint for resolv.conf in case it is a symlink
- resolvPath, err := filepath.EvalSymlinks("/etc/resolv.conf")
- if err != nil {
- return "", err
- }
-
- contents, err := ioutil.ReadFile(resolvPath)
- if err != nil {
- return "", errors.Wrapf(err, "unable to read %s", resolvPath)
- }
-
- // Process the file to remove localhost nameservers
- // TODO: set ipv6 enable bool more sanely
- resolv, err := resolvconf.FilterResolvDNS(contents, true)
- if err != nil {
- return "", errors.Wrapf(err, "error parsing host resolv.conf")
- }
-
- // Make a new resolv.conf
- nameservers := resolvconf.GetNameservers(resolv.Content)
- if len(c.config.DNSServer) > 0 {
- // We store DNS servers as net.IP, so need to convert to string
- nameservers = []string{}
- for _, server := range c.config.DNSServer {
- nameservers = append(nameservers, server.String())
- }
- }
-
- search := resolvconf.GetSearchDomains(resolv.Content)
- if len(c.config.DNSSearch) > 0 {
- search = c.config.DNSSearch
- }
-
- options := resolvconf.GetOptions(resolv.Content)
- if len(c.config.DNSOption) > 0 {
- options = c.config.DNSOption
- }
-
- destPath := filepath.Join(c.state.RunDir, "resolv.conf")
-
- if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "error removing resolv.conf for container %s", c.ID())
- }
-
- // Build resolv.conf
- if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil {
- return "", errors.Wrapf(err, "error building resolv.conf for container %s")
- }
-
- // Relabel resolv.conf for the container
- if err := label.Relabel(destPath, c.config.MountLabel, false); err != nil {
- return "", err
- }
-
- return filepath.Join(c.state.DestinationRunDir, "resolv.conf"), nil
-}
-
-// generateHosts creates a containers hosts file
-func (c *Container) generateHosts() (string, error) {
- orig, err := ioutil.ReadFile("/etc/hosts")
- if err != nil {
- return "", errors.Wrapf(err, "unable to read /etc/hosts")
- }
- hosts := string(orig)
- if len(c.config.HostAdd) > 0 {
- for _, host := range c.config.HostAdd {
- // the host format has already been verified at this point
- fields := strings.SplitN(host, ":", 2)
- hosts += fmt.Sprintf("%s %s\n", fields[1], fields[0])
- }
- }
- if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 {
- ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0]
- hosts += fmt.Sprintf("%s\t%s\n", ipAddress, c.Hostname())
- }
- return c.writeStringToRundir("hosts", hosts)
-}
-
-func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator) error {
+func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
var uid, gid int
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
@@ -1260,12 +1053,8 @@ func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator)
}
if c.config.User != "" {
- if !c.state.Mounted {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to translate User field", c.ID())
- }
- execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, nil)
- if err != nil {
- return err
+ if execUser == nil {
+ return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
}
uid = execUser.Uid
gid = execUser.Gid
@@ -1379,10 +1168,6 @@ func (c *Container) saveSpec(spec *spec.Spec) error {
}
func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (extensionStageHooks map[string][]spec.Hook, err error) {
- if len(c.runtime.config.HooksDir) == 0 {
- return nil, nil
- }
-
var locale string
var ok bool
for _, envVar := range []string{
@@ -1410,25 +1195,43 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten
}
}
- allHooks := make(map[string][]spec.Hook)
- for _, hDir := range c.runtime.config.HooksDir {
- manager, err := hooks.New(ctx, []string{hDir}, []string{"poststop"}, lang)
- if err != nil {
- if c.runtime.config.HooksDirNotExistFatal || !os.IsNotExist(err) {
- return nil, err
- }
- logrus.Warnf("failed to load hooks: {}", err)
+ if c.runtime.config.HooksDir == nil {
+ if rootless.IsRootless() {
return nil, nil
}
- hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
- if err != nil {
- return nil, err
+ allHooks := make(map[string][]spec.Hook)
+ for _, hDir := range []string{hooks.DefaultDir, hooks.OverrideDir} {
+ manager, err := hooks.New(ctx, []string{hDir}, []string{"poststop"}, lang)
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ if err != nil {
+ return nil, err
+ }
+ if len(hooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir)
+ }
+ for i, hook := range hooks {
+ allHooks[i] = hook
+ }
}
- for i, hook := range hooks {
- allHooks[i] = hook
+ return allHooks, nil
+ }
+
+ manager, err := hooks.New(ctx, c.runtime.config.HooksDir, []string{"poststop"}, lang)
+ if err != nil {
+ if os.IsNotExist(err) {
+ logrus.Warnf("Requested OCI hooks directory %q does not exist", c.runtime.config.HooksDir)
+ return nil, nil
}
+ return nil, err
}
- return allHooks, nil
+
+ return manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
}
// mount mounts the container's root filesystem
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 163cd75e7..93d20491e 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -11,6 +11,7 @@ import (
"os"
"path"
"path/filepath"
+ "strconv"
"strings"
"sync"
"syscall"
@@ -21,8 +22,12 @@ import (
crioAnnotations "github.com/containers/libpod/pkg/annotations"
"github.com/containers/libpod/pkg/criu"
"github.com/containers/libpod/pkg/lookup"
+ "github.com/containers/libpod/pkg/resolvconf"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/secrets"
"github.com/containers/storage/pkg/idtools"
+ "github.com/mrunalp/fileutils"
+ "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -57,7 +62,7 @@ func (c *Container) prepare() (err error) {
networkStatus []*cnitypes.Result
createNetNSErr, mountStorageErr error
mountPoint string
- saveNetworkStatus bool
+ tmpStateLock sync.Mutex
)
wg.Add(2)
@@ -66,17 +71,55 @@ func (c *Container) prepare() (err error) {
defer wg.Done()
// Set up network namespace if not already set up
if c.config.CreateNetNS && c.state.NetNS == nil && !c.config.PostConfigureNetNS {
- saveNetworkStatus = true
netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c)
+
+ tmpStateLock.Lock()
+ defer tmpStateLock.Unlock()
+
+ // Assign NetNS attributes to container
+ if createNetNSErr == nil {
+ c.state.NetNS = netNS
+ c.state.NetworkStatus = networkStatus
+ }
}
}()
// Mount storage if not mounted
go func() {
defer wg.Done()
mountPoint, mountStorageErr = c.mountStorage()
+
+ if mountStorageErr != nil {
+ return
+ }
+
+ tmpStateLock.Lock()
+ defer tmpStateLock.Unlock()
+
+ // Finish up mountStorage
+ c.state.Mounted = true
+ c.state.Mountpoint = mountPoint
+ if c.state.UserNSRoot == "" {
+ c.state.RealMountpoint = c.state.Mountpoint
+ } else {
+ c.state.RealMountpoint = filepath.Join(c.state.UserNSRoot, "mountpoint")
+ }
+
+ logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint)
+ }()
+
+ defer func() {
+ if err != nil {
+ if err2 := c.cleanupNetwork(); err2 != nil {
+ logrus.Errorf("Error cleaning up container %s network: %v", c.ID(), err2)
+ }
+ if err2 := c.cleanupStorage(); err2 != nil {
+ logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), err2)
+ }
+ }
}()
wg.Wait()
+
if createNetNSErr != nil {
if mountStorageErr != nil {
logrus.Error(createNetNSErr)
@@ -88,28 +131,22 @@ func (c *Container) prepare() (err error) {
return mountStorageErr
}
- // Assign NetNS attributes to container
- if saveNetworkStatus {
- c.state.NetNS = netNS
- c.state.NetworkStatus = networkStatus
- }
-
- // Finish up mountStorage
- c.state.Mounted = true
- c.state.Mountpoint = mountPoint
- if c.state.UserNSRoot == "" {
- c.state.RealMountpoint = c.state.Mountpoint
- } else {
- c.state.RealMountpoint = filepath.Join(c.state.UserNSRoot, "mountpoint")
- }
-
- logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint)
// Save the container
return c.save()
}
// cleanupNetwork unmounts and cleans up the container's network
func (c *Container) cleanupNetwork() error {
+ if c.config.NetNsCtr != "" {
+ return nil
+ }
+ netDisabled, err := c.NetworkDisabled()
+ if err != nil {
+ return err
+ }
+ if netDisabled {
+ return nil
+ }
if c.state.NetNS == nil {
logrus.Debugf("Network is already cleaned up, skipping...")
return nil
@@ -147,6 +184,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
}
}
+
+ if err := c.makeBindMounts(); err != nil {
+ return nil, err
+ }
// Check if the spec file mounts contain the label Relabel flags z or Z.
// If they do, relabel the source directory and then remove the option.
for _, m := range g.Mounts() {
@@ -190,23 +231,18 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
- if !rootless.IsRootless() {
- if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil {
- return nil, errors.Wrapf(err, "error setting up OCI Hooks")
- }
+ if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil {
+ return nil, errors.Wrapf(err, "error setting up OCI Hooks")
}
// Bind builtin image volumes
if c.config.Rootfs == "" && c.config.ImageVolumes {
- if err := c.addLocalVolumes(ctx, &g); err != nil {
+ if err := c.addLocalVolumes(ctx, &g, execUser); err != nil {
return nil, errors.Wrapf(err, "error mounting image volumes")
}
}
if c.config.User != "" {
- if !c.state.Mounted {
- return nil, errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to translate User field", c.ID())
- }
// User and Group must go together
g.SetProcessUID(uint32(execUser.Uid))
g.SetProcessGID(uint32(execUser.Gid))
@@ -214,9 +250,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Add addition groups if c.config.GroupAdd is not empty
if len(c.config.Groups) > 0 {
- if !c.state.Mounted {
- return nil, errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to add additional groups", c.ID())
- }
gids, _ := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, nil)
for _, gid := range gids {
g.AddProcessAdditionalGid(gid)
@@ -325,8 +358,34 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Mounts need to be sorted so paths will not cover other paths
mounts := sortMounts(g.Mounts())
g.ClearMounts()
+
+ // Determine property of RootPropagation based on volume properties. If
+ // a volume is shared, then keep root propagation shared. This should
+ // work for slave and private volumes too.
+ //
+ // For slave volumes, it can be either [r]shared/[r]slave.
+ //
+ // For private volumes any root propagation value should work.
+ rootPropagation := ""
for _, m := range mounts {
g.AddMount(m)
+ for _, opt := range m.Options {
+ switch opt {
+ case MountShared, MountRShared:
+ if rootPropagation != MountShared && rootPropagation != MountRShared {
+ rootPropagation = MountShared
+ }
+ case MountSlave, MountRSlave:
+ if rootPropagation != MountShared && rootPropagation != MountRShared && rootPropagation != MountSlave && rootPropagation != MountRSlave {
+ rootPropagation = MountRSlave
+ }
+ }
+ }
+ }
+
+ if rootPropagation != "" {
+ logrus.Debugf("set root propagation to %q", rootPropagation)
+ g.SetLinuxRootPropagation(rootPropagation)
}
return g.Config, nil
}
@@ -409,7 +468,7 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
return nil
}
-func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
+func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) (err error) {
if !criu.CheckForCriu() {
return errors.Errorf("checkpointing a container requires at least CRIU %d", criu.MinCriuVersion)
@@ -418,7 +477,7 @@ func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
if c.state.State != ContainerStateRunning {
return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
- if err := c.runtime.ociRuntime.checkpointContainer(c); err != nil {
+ if err := c.runtime.ociRuntime.checkpointContainer(c, options); err != nil {
return err
}
@@ -435,14 +494,16 @@ func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
logrus.Debugf("Checkpointed container %s", c.ID())
- c.state.State = ContainerStateStopped
+ if !options.KeepRunning {
+ c.state.State = ContainerStateStopped
- // Cleanup Storage and Network
- if err := c.cleanup(ctx); err != nil {
- return err
+ // Cleanup Storage and Network
+ if err := c.cleanup(ctx); err != nil {
+ return err
+ }
}
- if !keep {
+ if !options.Keep {
// Remove log file
os.Remove(filepath.Join(c.bundlePath(), "dump.log"))
// Remove statistic file
@@ -452,7 +513,7 @@ func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
return c.save()
}
-func (c *Container) restore(ctx context.Context, keep bool) (err error) {
+func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
if !criu.CheckForCriu() {
return errors.Errorf("restoring a container requires at least CRIU %d", criu.MinCriuVersion)
@@ -540,7 +601,7 @@ func (c *Container) restore(ctx context.Context, keep bool) (err error) {
// Cleanup for a working restore.
c.removeConmonFiles()
- if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, true); err != nil {
+ if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil {
return err
}
@@ -548,7 +609,7 @@ func (c *Container) restore(ctx context.Context, keep bool) (err error) {
c.state.State = ContainerStateRunning
- if !keep {
+ if !options.Keep {
// Delete all checkpoint related files. At this point, in theory, all files
// should exist. Still ignoring errors for now as the container should be
// restored and running. Not erroring out just because some cleanup operation
@@ -569,3 +630,269 @@ func (c *Container) restore(ctx context.Context, keep bool) (err error) {
return c.save()
}
+
+// Make standard bind mounts to include in the container
+func (c *Container) makeBindMounts() error {
+ if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
+ return errors.Wrapf(err, "cannot chown run directory %s", c.state.RunDir)
+ }
+
+ if c.state.BindMounts == nil {
+ c.state.BindMounts = make(map[string]string)
+ }
+ netDisabled, err := c.NetworkDisabled()
+ if err != nil {
+ return err
+ }
+
+ if !netDisabled {
+ // If /etc/resolv.conf and /etc/hosts exist, delete them so we
+ // will recreate
+ if path, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
+ if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID())
+ }
+ delete(c.state.BindMounts, "/etc/resolv.conf")
+ }
+ if path, ok := c.state.BindMounts["/etc/hosts"]; ok {
+ if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error removing container %s hosts", c.ID())
+ }
+ delete(c.state.BindMounts, "/etc/hosts")
+ }
+
+ if c.config.NetNsCtr != "" {
+ // We share a net namespace
+ // We want /etc/resolv.conf and /etc/hosts from the
+ // other container
+ depCtr, err := c.runtime.state.Container(c.config.NetNsCtr)
+ if err != nil {
+ return errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID())
+ }
+
+ // We need that container's bind mounts
+ bindMounts, err := depCtr.BindMounts()
+ if err != nil {
+ return errors.Wrapf(err, "error fetching bind mounts from dependency %s of container %s", depCtr.ID(), c.ID())
+ }
+
+ // The other container may not have a resolv.conf or /etc/hosts
+ // If it doesn't, don't copy them
+ resolvPath, exists := bindMounts["/etc/resolv.conf"]
+ if exists {
+ resolvDest := filepath.Join(c.state.RunDir, "resolv.conf")
+ if err := fileutils.CopyFile(resolvPath, resolvDest); err != nil {
+ return errors.Wrapf(err, "error copying resolv.conf from dependency container %s of container %s", depCtr.ID(), c.ID())
+ }
+ c.state.BindMounts["/etc/resolv.conf"] = resolvDest
+ }
+
+ hostsPath, exists := bindMounts["/etc/hosts"]
+ if exists {
+ hostsDest := filepath.Join(c.state.RunDir, "hosts")
+ if err := fileutils.CopyFile(hostsPath, hostsDest); err != nil {
+ return errors.Wrapf(err, "error copying hosts file from dependency container %s of container %s", depCtr.ID(), c.ID())
+ }
+ c.state.BindMounts["/etc/hosts"] = hostsDest
+ }
+ } else {
+ newResolv, err := c.generateResolvConf()
+ if err != nil {
+ return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
+ }
+ c.state.BindMounts["/etc/resolv.conf"] = newResolv
+
+ newHosts, err := c.generateHosts()
+ if err != nil {
+ return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
+ }
+ c.state.BindMounts["/etc/hosts"] = newHosts
+ }
+ }
+
+ // SHM is always added when we mount the container
+ c.state.BindMounts["/dev/shm"] = c.config.ShmDir
+
+ newPasswd, err := c.generatePasswd()
+ if err != nil {
+ return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
+ }
+ if newPasswd != "" {
+ // Make /etc/passwd
+ if _, ok := c.state.BindMounts["/etc/passwd"]; ok {
+ // If it already exists, delete so we can recreate
+ delete(c.state.BindMounts, "/etc/passwd")
+ }
+ logrus.Debugf("adding entry to /etc/passwd for non existent default user")
+ c.state.BindMounts["/etc/passwd"] = newPasswd
+ }
+
+ // Make /etc/hostname
+ // This should never change, so no need to recreate if it exists
+ if _, ok := c.state.BindMounts["/etc/hostname"]; !ok {
+ hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname())
+ if err != nil {
+ return errors.Wrapf(err, "error creating hostname file for container %s", c.ID())
+ }
+ c.state.BindMounts["/etc/hostname"] = hostnamePath
+ }
+
+ // Make .containerenv
+ // Empty file, so no need to recreate if it exists
+ if _, ok := c.state.BindMounts["/run/.containerenv"]; !ok {
+ // Empty string for now, but we may consider populating this later
+ containerenvPath, err := c.writeStringToRundir(".containerenv", "")
+ if err != nil {
+ return errors.Wrapf(err, "error creating containerenv file for container %s", c.ID())
+ }
+ c.state.BindMounts["/run/.containerenv"] = containerenvPath
+ }
+
+ // Add Secret Mounts
+ secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID())
+ for _, mount := range secretMounts {
+ if _, ok := c.state.BindMounts[mount.Destination]; !ok {
+ c.state.BindMounts[mount.Destination] = mount.Source
+ }
+ }
+
+ return nil
+}
+
+// generateResolvConf generates a containers resolv.conf
+func (c *Container) generateResolvConf() (string, error) {
+ // Determine the endpoint for resolv.conf in case it is a symlink
+ resolvPath, err := filepath.EvalSymlinks("/etc/resolv.conf")
+ if err != nil {
+ return "", err
+ }
+
+ contents, err := ioutil.ReadFile(resolvPath)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to read %s", resolvPath)
+ }
+
+ // Ensure that the container's /etc/resolv.conf is compatible with its
+ // network configuration.
+ // TODO: set ipv6 enable bool more sanely
+ resolv, err := resolvconf.FilterResolvDNS(contents, true, c.config.CreateNetNS)
+ if err != nil {
+ return "", errors.Wrapf(err, "error parsing host resolv.conf")
+ }
+
+ // Make a new resolv.conf
+ nameservers := resolvconf.GetNameservers(resolv.Content)
+ if len(c.config.DNSServer) > 0 {
+ // We store DNS servers as net.IP, so need to convert to string
+ nameservers = []string{}
+ for _, server := range c.config.DNSServer {
+ nameservers = append(nameservers, server.String())
+ }
+ }
+
+ search := resolvconf.GetSearchDomains(resolv.Content)
+ if len(c.config.DNSSearch) > 0 {
+ search = c.config.DNSSearch
+ }
+
+ options := resolvconf.GetOptions(resolv.Content)
+ if len(c.config.DNSOption) > 0 {
+ options = c.config.DNSOption
+ }
+
+ destPath := filepath.Join(c.state.RunDir, "resolv.conf")
+
+ if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
+ return "", errors.Wrapf(err, "error removing resolv.conf for container %s", c.ID())
+ }
+
+ // Build resolv.conf
+ if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil {
+ return "", errors.Wrapf(err, "error building resolv.conf for container %s", c.ID())
+ }
+
+ // Relabel resolv.conf for the container
+ if err := label.Relabel(destPath, c.config.MountLabel, false); err != nil {
+ return "", err
+ }
+
+ return filepath.Join(c.state.DestinationRunDir, "resolv.conf"), nil
+}
+
+// generateHosts creates a containers hosts file
+func (c *Container) generateHosts() (string, error) {
+ orig, err := ioutil.ReadFile("/etc/hosts")
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to read /etc/hosts")
+ }
+ hosts := string(orig)
+ if len(c.config.HostAdd) > 0 {
+ for _, host := range c.config.HostAdd {
+ // the host format has already been verified at this point
+ fields := strings.SplitN(host, ":", 2)
+ hosts += fmt.Sprintf("%s %s\n", fields[1], fields[0])
+ }
+ }
+ if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 {
+ ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0]
+ hosts += fmt.Sprintf("%s\t%s\n", ipAddress, c.Hostname())
+ }
+ return c.writeStringToRundir("hosts", hosts)
+}
+
+// generatePasswd generates a container specific passwd file,
+// iff g.config.User is a number
+func (c *Container) generatePasswd() (string, error) {
+ var (
+ groupspec string
+ gid int
+ )
+ if c.config.User == "" {
+ return "", nil
+ }
+ spec := strings.SplitN(c.config.User, ":", 2)
+ userspec := spec[0]
+ if len(spec) > 1 {
+ groupspec = spec[1]
+ }
+ // If a non numeric User, then don't generate passwd
+ uid, err := strconv.ParseUint(userspec, 10, 32)
+ if err != nil {
+ return "", nil
+ }
+ // Lookup the user to see if it exists in the container image
+ _, err = lookup.GetUser(c.state.Mountpoint, userspec)
+ if err != nil && err != user.ErrNoPasswdEntries {
+ return "", err
+ }
+ if err == nil {
+ return "", nil
+ }
+ if groupspec != "" {
+ ugid, err := strconv.ParseUint(groupspec, 10, 32)
+ if err == nil {
+ gid = int(ugid)
+ } else {
+ group, err := lookup.GetGroup(c.state.Mountpoint, groupspec)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
+ }
+ gid = group.Gid
+ }
+ }
+ originPasswdFile := filepath.Join(c.state.Mountpoint, "/etc/passwd")
+ orig, err := ioutil.ReadFile(originPasswdFile)
+ if err != nil && !os.IsNotExist(err) {
+ return "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile)
+ }
+
+ pwd := fmt.Sprintf("%s%d:x:%d:%d:container user:%s:/bin/sh\n", orig, uid, uid, gid, c.WorkingDir())
+ passwdFile, err := c.writeStringToRundir("passwd", pwd)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to create temporary passwd file")
+ }
+ if os.Chmod(passwdFile, 0644); err != nil {
+ return "", err
+ }
+ return passwdFile, nil
+}
diff --git a/libpod/container_internal_unsupported.go b/libpod/container_internal_unsupported.go
index eed0449a9..4af0cd56c 100644
--- a/libpod/container_internal_unsupported.go
+++ b/libpod/container_internal_unsupported.go
@@ -28,10 +28,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
return nil, ErrNotImplemented
}
-func (c *Container) checkpoint(ctx context.Context, keep bool) error {
+func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
return ErrNotImplemented
}
-func (c *Container) restore(ctx context.Context, keep bool) error {
+func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) error {
return ErrNotImplemented
}
diff --git a/libpod/errors.go b/libpod/errors.go
index 75b4928da..d6614141c 100644
--- a/libpod/errors.go
+++ b/libpod/errors.go
@@ -11,18 +11,24 @@ var (
ErrNoSuchPod = errors.New("no such pod")
// ErrNoSuchImage indicates the requested image does not exist
ErrNoSuchImage = errors.New("no such image")
+ // ErrNoSuchVolume indicates the requested volume does not exist
+ ErrNoSuchVolume = errors.New("no such volume")
// ErrCtrExists indicates a container with the same name or ID already
// exists
ErrCtrExists = errors.New("container already exists")
// ErrPodExists indicates a pod with the same name or ID already exists
ErrPodExists = errors.New("pod already exists")
- // ErrImageExists indicated an image with the same ID already exists
+ // ErrImageExists indicates an image with the same ID already exists
ErrImageExists = errors.New("image already exists")
+ // ErrVolumeExists indicates a volume with the same name already exists
+ ErrVolumeExists = errors.New("volume already exists")
// ErrCtrStateInvalid indicates a container is in an improper state for
// the requested operation
ErrCtrStateInvalid = errors.New("container state improper")
+ // ErrVolumeBeingUsed indicates that a volume is being used by at least one container
+ ErrVolumeBeingUsed = errors.New("volume is being used")
// ErrRuntimeFinalized indicates that the runtime has already been
// created and cannot be modified
@@ -33,6 +39,9 @@ var (
// ErrPodFinalized indicates that the pod has already been created and
// cannot be modified
ErrPodFinalized = errors.New("pod has been finalized")
+ // ErrVolumeFinalized indicates that the volume has already been created and
+ // cannot be modified
+ ErrVolumeFinalized = errors.New("volume has been finalized")
// ErrInvalidArg indicates that an invalid argument was passed
ErrInvalidArg = errors.New("invalid argument")
@@ -55,6 +64,9 @@ var (
// ErrPodRemoved indicates that the pod has already been removed and no
// further operations can be performed on it
ErrPodRemoved = errors.New("pod has already been removed")
+ // ErrVolumeRemoved indicates that the volume has already been removed and
+ // no further operations can be performed on it
+ ErrVolumeRemoved = errors.New("volume has already been removed")
// ErrDBClosed indicates that the connection to the state database has
// already been closed
diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go
index 97a151396..c191a3ca2 100644
--- a/libpod/image/docker_registry_options.go
+++ b/libpod/image/docker_registry_options.go
@@ -19,8 +19,9 @@ type DockerRegistryOptions struct {
// except for ".cert" and ".key" suffixes).
DockerCertPath string
// DockerInsecureSkipTLSVerify turns off verification of TLS
- // certificates and allows connecting to registries without encryption.
- DockerInsecureSkipTLSVerify bool
+ // certificates and allows connecting to registries without encryption
+ // - or forces it on even if registries.conf has the registry configured as insecure.
+ DockerInsecureSkipTLSVerify types.OptionalBool
}
// GetSystemContext constructs a new system context from a parent context. the values in the DockerRegistryOptions, and other parameters.
diff --git a/libpod/image/errors.go b/libpod/image/errors.go
new file mode 100644
index 000000000..4088946cb
--- /dev/null
+++ b/libpod/image/errors.go
@@ -0,0 +1,15 @@
+package image
+
+import (
+ "errors"
+)
+
+// Copied directly from libpod errors to avoid circular imports
+var (
+ // ErrNoSuchCtr indicates the requested container does not exist
+ ErrNoSuchCtr = errors.New("no such container")
+ // ErrNoSuchPod indicates the requested pod does not exist
+ ErrNoSuchPod = errors.New("no such pod")
+ // ErrNoSuchImage indicates the requested image does not exist
+ ErrNoSuchImage = errors.New("no such image")
+)
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 7e520d97e..476d28226 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -125,7 +125,7 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) {
// New creates a new image object where the image could be local
// or remote
-func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, forcePull, forceSecure bool) (*Image, error) {
+func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, forcePull bool) (*Image, error) {
// We don't know if the image is local or not ... check local first
newImage := Image{
InputName: name,
@@ -145,7 +145,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, forceSecure)
+ imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions)
if err != nil {
return nil, errors.Wrapf(err, "unable to pull %s", name)
}
@@ -167,7 +167,7 @@ func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.Im
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{}, false)
+ imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to pull %s", transports.ImageName(srcRef))
}
@@ -252,7 +252,7 @@ func (i *Image) getLocalImage() (*storage.Image, error) {
// The image has a registry name in it and we made sure we looked for it locally
// with a tag. It cannot be local.
if decomposedImage.hasRegistry {
- return nil, errors.Errorf("%s", imageError)
+ return nil, errors.Wrapf(ErrNoSuchImage, imageError)
}
@@ -275,7 +275,7 @@ func (i *Image) getLocalImage() (*storage.Image, error) {
return repoImage, nil
}
- return nil, errors.Wrapf(err, imageError)
+ return nil, errors.Wrapf(ErrNoSuchImage, err.Error())
}
// ID returns the image ID as a string
@@ -498,7 +498,7 @@ func (i *Image) UntagImage(tag string) error {
// PushImageToHeuristicDestination pushes the given image to "destination", which is heuristically parsed.
// Use PushImageToReference if the destination is known precisely.
-func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, forceSecure bool, additionalDockerArchiveTags []reference.NamedTagged) error {
+func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
if destination == "" {
return errors.Wrapf(syscall.EINVAL, "destination image name must be specified")
}
@@ -516,11 +516,11 @@ func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination
return err
}
}
- return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, forceSecure, additionalDockerArchiveTags)
+ return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags)
}
// PushImageToReference pushes the given image to a location described by the given path
-func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, forceSecure bool, additionalDockerArchiveTags []reference.NamedTagged) error {
+func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
sc := GetSystemContext(signaturePolicyPath, authFile, forceCompress)
policyContext, err := getPolicyContext(sc)
@@ -534,23 +534,8 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere
if err != nil {
return errors.Wrapf(err, "error getting source imageReference for %q", i.InputName)
}
- insecureRegistries, err := registries.GetInsecureRegistries()
- if err != nil {
- return err
- }
copyOptions := getCopyOptions(sc, writer, nil, dockerRegistryOptions, signingOptions, manifestMIMEType, additionalDockerArchiveTags)
- if dest.Transport().Name() == DockerTransport {
- imgRef := dest.DockerReference()
- if imgRef == nil { // This should never happen; such references can’t be created.
- return fmt.Errorf("internal error: DockerTransport reference %s does not have a DockerReference", transports.ImageName(dest))
- }
- registry := reference.Domain(imgRef)
-
- if util.StringInSlice(registry, insecureRegistries) && !forceSecure {
- copyOptions.DestinationCtx.DockerInsecureSkipTLSVerify = true
- logrus.Info(fmt.Sprintf("%s is an insecure registry; pushing with tls-verify=false", registry))
- }
- }
+ copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
// Copy the image to the remote destination
_, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
if err != nil {
@@ -869,6 +854,7 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) {
GraphDriver: driver,
ManifestType: manifestType,
User: ociv1Img.Config.User,
+ History: ociv1Img.History,
}
return data, nil
}
diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go
index f187631b4..91bb2411b 100644
--- a/libpod/image/image_test.go
+++ b/libpod/image/image_test.go
@@ -86,9 +86,9 @@ func TestImage_NewFromLocal(t *testing.T) {
// Need images to be present for this test
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
- bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, false, false)
+ bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, false)
assert.NoError(t, err)
- bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, false, false)
+ bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, false)
assert.NoError(t, err)
tm, err := makeLocalMatrix(bb, bbglibc)
@@ -135,7 +135,7 @@ func TestImage_New(t *testing.T) {
// Iterate over the names and delete the image
// after the pull
for _, img := range names {
- newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, false, false)
+ newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, false)
assert.NoError(t, err)
assert.NotEqual(t, newImage.ID(), "")
err = newImage.Remove(false)
@@ -163,7 +163,7 @@ func TestImage_MatchRepoTag(t *testing.T) {
}
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
- newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, false, false)
+ newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, false)
assert.NoError(t, err)
err = newImage.TagImage("foo:latest")
assert.NoError(t, err)
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
new file mode 100644
index 000000000..6a1f160d5
--- /dev/null
+++ b/libpod/image/prune.go
@@ -0,0 +1,26 @@
+package image
+
+// GetPruneImages returns a slice of images that have no names/unused
+func (ir *Runtime) GetPruneImages() ([]*Image, error) {
+ var (
+ unamedImages []*Image
+ )
+ allImages, err := ir.GetImages()
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range allImages {
+ if len(i.Names()) == 0 {
+ unamedImages = append(unamedImages, i)
+ continue
+ }
+ containers, err := i.Containers()
+ if err != nil {
+ return nil, err
+ }
+ if len(containers) < 1 {
+ unamedImages = append(unamedImages, i)
+ }
+ }
+ return unamedImages, nil
+}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index bfa04d069..09935fe7c 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/image/directory"
"github.com/containers/image/docker"
dockerarchive "github.com/containers/image/docker/archive"
- "github.com/containers/image/docker/reference"
"github.com/containers/image/docker/tarfile"
ociarchive "github.com/containers/image/oci/archive"
"github.com/containers/image/pkg/sysregistries"
@@ -19,7 +18,6 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/libpod/pkg/registries"
- "github.com/containers/libpod/pkg/util"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -193,7 +191,7 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
// pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries.
// Use pullImageFromReference if the source is known precisely.
-func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, forceSecure bool) ([]string, error) {
+func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
var goal *pullGoal
sc := GetSystemContext(signaturePolicyPath, authfile, false)
srcRef, err := alltransports.ParseImageName(inputName)
@@ -209,48 +207,33 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
return nil, errors.Wrapf(err, "error determining pull goal for image %q", inputName)
}
}
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, forceSecure)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions)
}
// pullImageFromReference pulls an image from a types.imageReference.
-func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, forceSecure bool) ([]string, error) {
+func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
sc := GetSystemContext(signaturePolicyPath, authfile, false)
goal, err := ir.pullGoalFromImageReference(ctx, srcRef, transports.ImageName(srcRef), sc)
if err != nil {
return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef))
}
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, forceSecure)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions)
}
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
-func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, forceSecure bool) ([]string, error) {
+func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
policyContext, err := getPolicyContext(sc)
if err != nil {
return nil, err
}
defer policyContext.Destroy()
- insecureRegistries, err := registries.GetInsecureRegistries()
- if err != nil {
- return nil, err
- }
+ systemRegistriesConfPath := registries.SystemRegistriesConfPath()
var images []string
var pullErrors *multierror.Error
for _, imageInfo := range goal.refPairs {
copyOptions := getCopyOptions(sc, writer, dockerOptions, nil, signingOptions, "", nil)
- if imageInfo.srcRef.Transport().Name() == DockerTransport {
- imgRef := imageInfo.srcRef.DockerReference()
- if imgRef == nil { // This should never happen; such references can’t be created.
- return nil, fmt.Errorf("internal error: DockerTransport reference %s does not have a DockerReference",
- transports.ImageName(imageInfo.srcRef))
- }
- registry := reference.Domain(imgRef)
-
- if util.StringInSlice(registry, insecureRegistries) && !forceSecure {
- copyOptions.SourceCtx.DockerInsecureSkipTLSVerify = true
- logrus.Info(fmt.Sprintf("%s is an insecure registry; pulling with tls-verify=false", registry))
- }
- }
+ copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
// Print the following statement only when pulling from a docker or atomic registry
if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) {
io.WriteString(writer, fmt.Sprintf("Trying to pull %s...", imageInfo.image))
@@ -271,7 +254,7 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
}
// If no image was found, we should handle. Lets be nicer to the user and see if we can figure out why.
if len(images) == 0 {
- registryPath := sysregistries.RegistriesConfPath(&types.SystemContext{})
+ registryPath := sysregistries.RegistriesConfPath(&types.SystemContext{SystemRegistriesConfPath: systemRegistriesConfPath})
if goal.usedSearchRegistries && len(goal.searchedRegistries) == 0 {
return nil, errors.Errorf("image name provided is a short name and no search registries are defined in %s.", registryPath)
}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index 9a75ca6dc..b944de1bb 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -2,6 +2,8 @@ package image
import (
"io"
+ "net/url"
+ "regexp"
"strings"
cp "github.com/containers/image/copy"
@@ -117,3 +119,23 @@ func GetAdditionalTags(images []string) ([]reference.NamedTagged, error) {
}
return allTags, nil
}
+
+// IsValidImageURI checks if image name has valid format
+func IsValidImageURI(imguri string) (bool, error) {
+ uri := "http://" + imguri
+ u, err := url.Parse(uri)
+ if err != nil {
+ return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ }
+ reg := regexp.MustCompile(`^[a-zA-Z0-9-_\.]+\/?:?[0-9]*[a-z0-9-\/:]*$`)
+ ret := reg.FindAllString(u.Host, -1)
+ if len(ret) == 0 {
+ return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ }
+ reg = regexp.MustCompile(`^[a-z0-9-:\./]*$`)
+ ret = reg.FindAllString(u.Fragment, -1)
+ if len(ret) == 0 {
+ return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ }
+ return true, nil
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 78e765ccd..314799309 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -18,8 +18,10 @@ type InMemoryState struct {
pods map[string]*Pod
// Maps container ID to container struct.
containers map[string]*Container
+ volumes map[string]*Volume
// Maps container ID to a list of IDs of dependencies.
- ctrDepends map[string][]string
+ ctrDepends map[string][]string
+ volumeDepends map[string][]string
// Maps pod ID to a map of container ID to container struct.
podContainers map[string]map[string]*Container
// Global name registry - ensures name uniqueness and performs lookups.
@@ -46,8 +48,10 @@ func NewInMemoryState() (State, error) {
state.pods = make(map[string]*Pod)
state.containers = make(map[string]*Container)
+ state.volumes = make(map[string]*Volume)
state.ctrDepends = make(map[string][]string)
+ state.volumeDepends = make(map[string][]string)
state.podContainers = make(map[string]map[string]*Container)
@@ -73,6 +77,18 @@ func (s *InMemoryState) Refresh() error {
return nil
}
+// GetDBConfig is not implemented for in-memory state.
+// As we do not store a config, return an empty one.
+func (s *InMemoryState) GetDBConfig() (*DBConfig, error) {
+ return &DBConfig{}, nil
+}
+
+// ValidateDBConfig is not implemented for the in-memory state.
+// Since we do nothing just return no error.
+func (s *InMemoryState) ValidateDBConfig(runtime *Runtime) error {
+ return nil
+}
+
// SetNamespace sets the namespace for container and pod retrieval.
func (s *InMemoryState) SetNamespace(ns string) error {
s.namespace = ns
@@ -232,6 +248,14 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
s.addCtrToDependsMap(ctr.ID(), depCtr)
}
+ // Add container to volume dependencies
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+ s.addCtrToVolDependsMap(ctr.ID(), volName)
+ }
+ }
+
return nil
}
@@ -282,6 +306,14 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
s.removeCtrFromDependsMap(ctr.ID(), depCtr)
}
+ // Remove container from volume dependencies
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+ s.removeCtrFromVolDependsMap(ctr.ID(), volName)
+ }
+ }
+
return nil
}
@@ -346,6 +378,114 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
return arr, nil
}
+// Volume retrieves a volume from its full name
+func (s *InMemoryState) Volume(name string) (*Volume, error) {
+ if name == "" {
+ return nil, ErrEmptyID
+ }
+
+ vol, ok := s.volumes[name]
+ if !ok {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no volume with name %s found", name)
+ }
+
+ return vol, nil
+}
+
+// HasVolume checks if a volume with the given name is present in the state
+func (s *InMemoryState) HasVolume(name string) (bool, error) {
+ if name == "" {
+ return false, ErrEmptyID
+ }
+
+ _, ok := s.volumes[name]
+ if !ok {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// AddVolume adds a volume to the state
+func (s *InMemoryState) AddVolume(volume *Volume) error {
+ if !volume.valid {
+ return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
+ }
+
+ if _, ok := s.volumes[volume.Name()]; ok {
+ return errors.Wrapf(ErrVolumeExists, "volume with name %s already exists in state", volume.Name())
+ }
+
+ s.volumes[volume.Name()] = volume
+
+ return nil
+}
+
+// RemoveVolume removes a volume from the state
+func (s *InMemoryState) RemoveVolume(volume *Volume) error {
+ // Ensure we don't remove a volume which containers depend on
+ deps, ok := s.volumeDepends[volume.Name()]
+ if ok && len(deps) != 0 {
+ depsStr := strings.Join(deps, ", ")
+ return errors.Wrapf(ErrVolumeExists, "the following containers depend on volume %s: %s", volume.Name(), depsStr)
+ }
+
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ volume.valid = false
+ return errors.Wrapf(ErrVolumeRemoved, "no volume exists in state with name %s", volume.Name())
+ }
+
+ delete(s.volumes, volume.Name())
+
+ return nil
+}
+
+// RemoveVolCtrDep updates the container dependencies of the volume
+func (s *InMemoryState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
+ if !volume.valid {
+ return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
+ }
+
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ return errors.Wrapf(ErrNoSuchVolume, "volume with name %s doesn't exists in state", volume.Name())
+ }
+
+ // Remove container that is using this volume
+ s.removeCtrFromVolDependsMap(ctrID, volume.Name())
+
+ return nil
+}
+
+// VolumeInUse checks if the given volume is being used by at least one container
+func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) {
+ if !volume.valid {
+ return nil, ErrVolumeRemoved
+ }
+
+ // If the volume does not exist, return error
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ volume.valid = false
+ return nil, errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found in state", volume.Name())
+ }
+
+ arr, ok := s.volumeDepends[volume.Name()]
+ if !ok {
+ return []string{}, nil
+ }
+
+ return arr, nil
+}
+
+// AllVolumes returns all volumes that exist in the state
+func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
+ allVols := make([]*Volume, 0, len(s.volumes))
+ for _, v := range s.volumes {
+ allVols = append(allVols, v)
+ }
+
+ return allVols, nil
+}
+
// AllContainers retrieves all containers from the state
func (s *InMemoryState) AllContainers() ([]*Container, error) {
ctrs := make([]*Container, 0, len(s.containers))
@@ -933,6 +1073,44 @@ func (s *InMemoryState) removeCtrFromDependsMap(ctrID, dependsID string) {
}
}
+// Add a container to the dependency mappings for the volume
+func (s *InMemoryState) addCtrToVolDependsMap(depCtrID, volName string) {
+ if volName != "" {
+ arr, ok := s.volumeDepends[volName]
+ if !ok {
+ // Do not have a mapping for that volume yet
+ s.volumeDepends[volName] = []string{depCtrID}
+ } else {
+ // Have a mapping for the volume
+ arr = append(arr, depCtrID)
+ s.volumeDepends[volName] = arr
+ }
+ }
+}
+
+// Remove a container from the dependency mappings for the volume
+func (s *InMemoryState) removeCtrFromVolDependsMap(depCtrID, volName string) {
+ if volName != "" {
+ arr, ok := s.volumeDepends[volName]
+ if !ok {
+ // Internal state seems inconsistent
+ // But the dependency is definitely gone
+ // So just return
+ return
+ }
+
+ newArr := make([]string, 0, len(arr))
+
+ for _, id := range arr {
+ if id != depCtrID {
+ newArr = append(newArr, id)
+ }
+ }
+
+ s.volumeDepends[volName] = newArr
+ }
+}
+
// Check if we can access a pod or container, or if that is blocked by
// namespaces.
func (s *InMemoryState) checkNSMatch(id, ns string) error {
diff --git a/libpod/info.go b/libpod/info.go
index 4cbf3f734..5d8d160c8 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -12,6 +12,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/utils"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
@@ -30,6 +31,7 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) {
info["os"] = runtime.GOOS
info["arch"] = runtime.GOARCH
info["cpus"] = runtime.NumCPU()
+ info["rootless"] = rootless.IsRootless()
mi, err := system.ReadMemInfo()
if err != nil {
return nil, errors.Wrapf(err, "error reading memory info")
diff --git a/libpod/kube.go b/libpod/kube.go
new file mode 100644
index 000000000..c164ca0c5
--- /dev/null
+++ b/libpod/kube.go
@@ -0,0 +1,440 @@
+package libpod
+
+import (
+ "fmt"
+ "math/rand"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/libpod/pkg/lookup"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// GenerateForKube takes a slice of libpod containers and generates
+// one v1.Pod description that includes just a single container.
+func (c *Container) GenerateForKube() (*v1.Pod, error) {
+ // Generate the v1.Pod yaml description
+ return simplePodWithV1Container(c)
+}
+
+// GenerateForKube takes a slice of libpod containers and generates
+// one v1.Pod description
+func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
+ // Generate the v1.Pod yaml description
+ var servicePorts []v1.ServicePort
+
+ allContainers, err := p.allContainers()
+ if err != nil {
+ return nil, servicePorts, err
+ }
+ // If the pod has no containers, no sense to generate YAML
+ if len(allContainers) == 0 {
+ return nil, servicePorts, errors.Errorf("pod %s has no containers", p.ID())
+ }
+ // If only an infra container is present, makes no sense to generate YAML
+ if len(allContainers) == 1 && p.HasInfraContainer() {
+ return nil, servicePorts, errors.Errorf("pod %s only has an infra container", p.ID())
+ }
+
+ if p.HasInfraContainer() {
+ infraContainer, err := p.getInfraContainer()
+ if err != nil {
+ return nil, servicePorts, err
+ }
+
+ ports, err := ocicniPortMappingToContainerPort(infraContainer.config.PortMappings)
+ if err != nil {
+ return nil, servicePorts, err
+ }
+ servicePorts = containerPortsToServicePorts(ports)
+ }
+ pod, err := p.podWithContainers(allContainers)
+ return pod, servicePorts, err
+}
+
+func (p *Pod) getInfraContainer() (*Container, error) {
+ infraID, err := p.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+ return p.runtime.LookupContainer(infraID)
+}
+
+// GenerateKubeServiceFromV1Pod creates a v1 service object from a v1 pod object
+func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) v1.Service {
+ service := v1.Service{}
+ selector := make(map[string]string)
+ selector["app"] = pod.Labels["app"]
+ ports := servicePorts
+ if len(ports) == 0 {
+ ports = containersToServicePorts(pod.Spec.Containers)
+ }
+ serviceSpec := v1.ServiceSpec{
+ Ports: ports,
+ Selector: selector,
+ Type: v1.ServiceTypeNodePort,
+ }
+ service.Spec = serviceSpec
+ service.ObjectMeta = pod.ObjectMeta
+ tm := v12.TypeMeta{
+ Kind: "Service",
+ APIVersion: pod.TypeMeta.APIVersion,
+ }
+ service.TypeMeta = tm
+ return service
+}
+
+// containerPortsToServicePorts takes a slice of containerports and generates a
+// slice of service ports
+func containerPortsToServicePorts(containerPorts []v1.ContainerPort) []v1.ServicePort {
+ var sps []v1.ServicePort
+ for _, cp := range containerPorts {
+ nodePort := 30000 + rand.Intn(32767-30000+1)
+ servicePort := v1.ServicePort{
+ Protocol: cp.Protocol,
+ Port: cp.ContainerPort,
+ NodePort: int32(nodePort),
+ Name: strconv.Itoa(int(cp.ContainerPort)),
+ }
+ sps = append(sps, servicePort)
+ }
+ return sps
+}
+
+// containersToServicePorts takes a slice of v1.Containers and generates an
+// inclusive list of serviceports to expose
+func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
+ var sps []v1.ServicePort
+ // Without the call to rand.Seed, a program will produce the same sequence of pseudo-random numbers
+ // for each execution. Legal nodeport range is 30000-32767
+ rand.Seed(time.Now().UnixNano())
+
+ for _, ctr := range containers {
+ sps = append(sps, containerPortsToServicePorts(ctr.Ports)...)
+ }
+ return sps
+}
+
+func (p *Pod) podWithContainers(containers []*Container) (*v1.Pod, error) {
+ var podContainers []v1.Container
+ for _, ctr := range containers {
+ result, err := containerToV1Container(ctr)
+ if err != nil {
+ return nil, err
+ }
+ if !ctr.IsInfra() {
+ podContainers = append(podContainers, result)
+ }
+ }
+
+ return addContainersToPodObject(podContainers, p.Name()), nil
+}
+
+func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod {
+ tm := v12.TypeMeta{
+ Kind: "Pod",
+ APIVersion: "v1",
+ }
+
+ // Add a label called "app" with the containers name as a value
+ labels := make(map[string]string)
+ labels["app"] = removeUnderscores(podName)
+ om := v12.ObjectMeta{
+ // The name of the pod is container_name-libpod
+ Name: fmt.Sprintf("%s-libpod", removeUnderscores(podName)),
+ Labels: labels,
+ // CreationTimestamp seems to be required, so adding it; in doing so, the timestamp
+ // will reflect time this is run (not container create time) because the conversion
+ // of the container create time to v1 Time is probably not warranted nor worthwhile.
+ CreationTimestamp: v12.Now(),
+ }
+ ps := v1.PodSpec{
+ Containers: containers,
+ }
+ p := v1.Pod{
+ TypeMeta: tm,
+ ObjectMeta: om,
+ Spec: ps,
+ }
+ return &p
+}
+
+// simplePodWithV1Container is a function used by inspect when kube yaml needs to be generated
+// for a single container. we "insert" that container description in a pod.
+func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
+ var containers []v1.Container
+ result, err := containerToV1Container(ctr)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, result)
+ return addContainersToPodObject(containers, ctr.Name()), nil
+
+}
+
+// containerToV1Container converts information we know about a libpod container
+// to a V1.Container specification.
+func containerToV1Container(c *Container) (v1.Container, error) {
+ kubeContainer := v1.Container{}
+ kubeSec, err := generateKubeSecurityContext(c)
+ if err != nil {
+ return kubeContainer, err
+ }
+
+ if len(c.config.Spec.Linux.Devices) > 0 {
+ // TODO Enable when we can support devices and their names
+ devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
+ if err != nil {
+ return kubeContainer, err
+ }
+ kubeContainer.VolumeDevices = devices
+ return kubeContainer, errors.Wrapf(ErrNotImplemented, "linux devices")
+ }
+
+ if len(c.config.UserVolumes) > 0 {
+ // TODO When we until we can resolve what the volume name should be, this is disabled
+ // Volume names need to be coordinated "globally" in the kube files.
+ volumes, err := libpodMountsToKubeVolumeMounts(c)
+ if err != nil {
+ return kubeContainer, err
+ }
+ kubeContainer.VolumeMounts = volumes
+ return kubeContainer, errors.Wrapf(ErrNotImplemented, "volume names")
+ }
+
+ envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env)
+ if err != nil {
+ return kubeContainer, nil
+ }
+
+ ports, err := ocicniPortMappingToContainerPort(c.PortMappings())
+ if err != nil {
+ return kubeContainer, nil
+ }
+
+ containerCommands := c.Command()
+ kubeContainer.Name = removeUnderscores(c.Name())
+
+ _, image := c.Image()
+ kubeContainer.Image = image
+ kubeContainer.Stdin = c.Stdin()
+ kubeContainer.Command = containerCommands
+ // TODO need to figure out how we handle command vs entry point. Kube appears to prefer entrypoint.
+ // right now we just take the container's command
+ //container.Args = args
+ kubeContainer.WorkingDir = c.WorkingDir()
+ kubeContainer.Ports = ports
+ // This should not be applicable
+ //container.EnvFromSource =
+ kubeContainer.Env = envVariables
+ // TODO enable resources when we can support naming conventions
+ //container.Resources
+ kubeContainer.SecurityContext = kubeSec
+ kubeContainer.StdinOnce = false
+ kubeContainer.TTY = c.config.Spec.Process.Terminal
+
+ return kubeContainer, nil
+}
+
+// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
+// it to a v1.ContainerPort format for kube output
+func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.ContainerPort, error) {
+ var containerPorts []v1.ContainerPort
+ for _, p := range portMappings {
+ var protocol v1.Protocol
+ switch strings.ToUpper(p.Protocol) {
+ case "TCP":
+ protocol = v1.ProtocolTCP
+ case "UDP":
+ protocol = v1.ProtocolUDP
+ default:
+ return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol)
+ }
+ cp := v1.ContainerPort{
+ // Name will not be supported
+ HostPort: p.HostPort,
+ HostIP: p.HostIP,
+ ContainerPort: p.ContainerPort,
+ Protocol: protocol,
+ }
+ containerPorts = append(containerPorts, cp)
+ }
+ return containerPorts, nil
+}
+
+// libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar
+func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
+ var envVars []v1.EnvVar
+ for _, e := range envs {
+ splitE := strings.SplitN(e, "=", 2)
+ if len(splitE) != 2 {
+ return envVars, errors.Errorf("environment variable %s is malformed; should be key=value", e)
+ }
+ ev := v1.EnvVar{
+ Name: splitE[0],
+ Value: splitE[1],
+ }
+ envVars = append(envVars, ev)
+ }
+ return envVars, nil
+}
+
+// Is this worth it?
+func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint
+ // It does not appear we can properly calculate CPU resources from the information
+ // we know in libpod. Libpod knows CPUs by time, shares, etc.
+
+ // We also only know about a memory limit; no memory minimum
+ maxResources := make(map[v1.ResourceName]resource.Quantity)
+ minResources := make(map[v1.ResourceName]resource.Quantity)
+ config := c.Config()
+ maxMem := config.Spec.Linux.Resources.Memory.Limit
+
+ _ = maxMem
+
+ return maxResources, minResources
+}
+
+func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) {
+ vm := v1.VolumeMount{}
+ for _, m := range mounts {
+ if m.Source == hostSourcePath {
+ // TODO Name is not provided and is required by Kube; therefore, this is disabled earlier
+ //vm.Name =
+ vm.MountPath = m.Source
+ vm.SubPath = m.Destination
+ if util.StringInSlice("ro", m.Options) {
+ vm.ReadOnly = true
+ }
+ return vm, nil
+ }
+ }
+ return vm, errors.New("unable to find mount source")
+}
+
+// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
+func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
+ // At this point, I dont think we can distinguish between the default
+ // volume mounts and user added ones. For now, we pass them all.
+ var vms []v1.VolumeMount
+ for _, hostSourcePath := range c.config.UserVolumes {
+ vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts)
+ if err != nil {
+ return vms, err
+ }
+ vms = append(vms, vm)
+ }
+ return vms, nil
+}
+
+func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
+ var (
+ drop []v1.Capability
+ add []v1.Capability
+ )
+ // Find caps in the defaultCaps but not in the container's
+ // those indicate a dropped cap
+ for _, capability := range defaultCaps {
+ if !util.StringInSlice(capability, containerCaps) {
+ cap := v1.Capability(capability)
+ drop = append(drop, cap)
+ }
+ }
+ // Find caps in the container but not in the defaults; those indicate
+ // an added cap
+ for _, capability := range containerCaps {
+ if !util.StringInSlice(capability, defaultCaps) {
+ cap := v1.Capability(capability)
+ add = append(add, cap)
+ }
+ }
+
+ return &v1.Capabilities{
+ Add: add,
+ Drop: drop,
+ }
+}
+
+func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) {
+ g, err := generate.New("linux")
+ if err != nil {
+ return nil, err
+ }
+ // Combine all the default capabilities into a slice
+ defaultCaps := append(g.Config.Process.Capabilities.Ambient, g.Config.Process.Capabilities.Bounding...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Effective...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Inheritable...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Permitted...)
+
+ // Combine all the container's capabilities into a slic
+ containerCaps := append(caps.Ambient, caps.Bounding...)
+ containerCaps = append(containerCaps, caps.Effective...)
+ containerCaps = append(containerCaps, caps.Inheritable...)
+ containerCaps = append(containerCaps, caps.Permitted...)
+
+ calculatedCaps := determineCapAddDropFromCapabilities(defaultCaps, containerCaps)
+ return calculatedCaps, nil
+}
+
+// generateKubeSecurityContext generates a securityContext based on the existing container
+func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
+ priv := c.Privileged()
+ ro := c.IsReadOnly()
+ allowPrivEscalation := !c.Spec().Process.NoNewPrivileges
+
+ newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities)
+ if err != nil {
+ return nil, err
+ }
+
+ sc := v1.SecurityContext{
+ Capabilities: newCaps,
+ Privileged: &priv,
+ // TODO How do we know if selinux were passed into podman
+ //SELinuxOptions:
+ // RunAsNonRoot is an optional parameter; our first implementations should be root only; however
+ // I'm leaving this as a bread-crumb for later
+ //RunAsNonRoot: &nonRoot,
+ ReadOnlyRootFilesystem: &ro,
+ AllowPrivilegeEscalation: &allowPrivEscalation,
+ }
+
+ if c.User() != "" {
+ // It is *possible* that
+ logrus.Debugf("Looking in container for user: %s", c.User())
+ u, err := lookup.GetUser(c.state.Mountpoint, c.User())
+ if err != nil {
+ return nil, err
+ }
+ user := int64(u.Uid)
+ sc.RunAsUser = &user
+ }
+ return &sc, nil
+}
+
+// generateKubeVolumeDeviceFromLinuxDevice takes a list of devices and makes a VolumeDevice struct for kube
+func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) ([]v1.VolumeDevice, error) {
+ var volumeDevices []v1.VolumeDevice
+ for _, d := range devices {
+ vd := v1.VolumeDevice{
+ // TBD How are we going to sync up these names
+ //Name:
+ DevicePath: d.Path,
+ }
+ volumeDevices = append(volumeDevices, vd)
+ }
+ return volumeDevices, nil
+}
+
+func removeUnderscores(s string) string {
+ return strings.Replace(s, "_", "", -1)
+}
diff --git a/libpod/mounts_linux.go b/libpod/mounts_linux.go
new file mode 100644
index 000000000..e6aa09eac
--- /dev/null
+++ b/libpod/mounts_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+
+package libpod
+
+const (
+ // MountPrivate represents the private mount option.
+ MountPrivate = "private"
+ // MountRPrivate represents the rprivate mount option.
+ MountRPrivate = "rprivate"
+ // MountShared represents the shared mount option.
+ MountShared = "shared"
+ // MountRShared represents the rshared mount option.
+ MountRShared = "rshared"
+ // MountSlave represents the slave mount option.
+ MountSlave = "slave"
+ // MountRSlave represents the rslave mount option.
+ MountRSlave = "rslave"
+)
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 863a764e2..43d0a61a4 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -64,20 +64,20 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
}
}()
- networkStatus := make([]*cnitypes.Result, 1)
+ networkStatus := make([]*cnitypes.Result, 0)
for idx, r := range results {
logrus.Debugf("[%d] CNI result: %v", idx, r.String())
resultCurrent, err := cnitypes.GetResult(r)
if err != nil {
return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.String(), err)
}
- networkStatus = append(ctr.state.NetworkStatus, resultCurrent)
+ networkStatus = append(networkStatus, resultCurrent)
}
// Add firewall rules to ensure the container has network access.
// Will not be necessary once CNI firewall plugin merges upstream.
// https://github.com/containernetworking/plugins/pull/75
- for _, netStatus := range ctr.state.NetworkStatus {
+ for _, netStatus := range networkStatus {
firewallConf := &firewall.FirewallNetConf{
PrevResult: netStatus,
}
@@ -90,13 +90,16 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
}
// Create and configure a new network namespace for a container
-func (r *Runtime) createNetNS(ctr *Container) (ns.NetNS, []*cnitypes.Result, error) {
+func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, err error) {
ctrNS, err := netns.NewNS()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID())
}
defer func() {
if err != nil {
+ if err2 := netns.UnmountNS(ctrNS); err2 != nil {
+ logrus.Errorf("Error unmounting partially created network namespace for container %s: %v", ctr.ID(), err2)
+ }
if err2 := ctrNS.Close(); err2 != nil {
logrus.Errorf("Error closing partially created network namespace for container %s: %v", ctr.ID(), err2)
}
diff --git a/libpod/oci.go b/libpod/oci.go
index 233bacfbb..093bfdd35 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -227,7 +227,7 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
return files, nil
}
-func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreContainer bool) (err error) {
+func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
var stderrBuf bytes.Buffer
runtimeDir, err := util.GetRootlessRuntimeDir()
@@ -289,8 +289,11 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
args = append(args, "--syslog")
}
- if restoreContainer {
+ if restoreOptions != nil {
args = append(args, "--restore", ctr.CheckpointPath())
+ if restoreOptions.TCPEstablished {
+ args = append(args, "--restore-arg", "--tcp-established")
+ }
}
logrus.WithFields(logrus.Fields{
@@ -316,6 +319,10 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_USERNS_CONFIGURED=%s", os.Getenv("_LIBPOD_USERNS_CONFIGURED")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%s", os.Getenv("_LIBPOD_ROOTLESS_UID")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
if r.reservePorts {
ports, err := bindPorts(ctr.config.PortMappings)
@@ -329,7 +336,7 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
}
- if rootless.IsRootless() {
+ if ctr.config.NetMode.IsSlirp4netns() {
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
return errors.Wrapf(err, "failed to create rootless network sync pipe")
@@ -350,7 +357,8 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
// Set the label of the conmon process to be level :s0
// This will allow the container processes to talk to fifo-files
// passed into the container by conmon
- plabel, err := selinux.CurrentLabel()
+ var plabel string
+ plabel, err = selinux.CurrentLabel()
if err != nil {
childPipe.Close()
return errors.Wrapf(err, "Failed to get current SELinux label")
@@ -360,7 +368,7 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
runtime.LockOSThread()
if c["level"] != "s0" && c["level"] != "" {
c["level"] = "s0"
- if err := label.SetProcessLabel(c.Get()); err != nil {
+ if err = label.SetProcessLabel(c.Get()); err != nil {
runtime.UnlockOSThread()
return err
}
@@ -583,6 +591,9 @@ func (r *OCIRuntime) startContainer(ctr *Container) error {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
+ if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok {
+ env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify))
+ }
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "start", ctr.ID()); err != nil {
return err
}
@@ -685,8 +696,12 @@ func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error {
// deleteContainer deletes a container from the OCI runtime
func (r *OCIRuntime) deleteContainer(ctr *Container) error {
- _, err := utils.ExecCmd(r.path, "delete", "--force", ctr.ID())
- return err
+ runtimeDir, err := util.GetRootlessRuntimeDir()
+ if err != nil {
+ return err
+ }
+ env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
+ return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "delete", "--force", ctr.ID())
}
// pauseContainer pauses the given container
@@ -740,6 +755,8 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
if tty {
args = append(args, "--tty")
+ } else {
+ args = append(args, "--tty=false")
}
if user != "" {
@@ -843,13 +860,26 @@ func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error {
}
// checkpointContainer checkpoints the given container
-func (r *OCIRuntime) checkpointContainer(ctr *Container) error {
+func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
+ label.SetSocketLabel(ctr.ProcessLabel())
// imagePath is used by CRIU to store the actual checkpoint files
imagePath := ctr.CheckpointPath()
// workPath will be used to store dump.log and stats-dump
workPath := ctr.bundlePath()
logrus.Debugf("Writing checkpoint to %s", imagePath)
logrus.Debugf("Writing checkpoint logs to %s", workPath)
- return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, "checkpoint",
- "--image-path", imagePath, "--work-path", workPath, ctr.ID())
+ args := []string{}
+ args = append(args, "checkpoint")
+ args = append(args, "--image-path")
+ args = append(args, imagePath)
+ args = append(args, "--work-path")
+ args = append(args, workPath)
+ if options.KeepRunning {
+ args = append(args, "--leave-running")
+ }
+ if options.TCPEstablished {
+ args = append(args, "--tcp-established")
+ }
+ args = append(args, ctr.ID())
+ return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...)
}
diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go
index 0447670b3..2737a641e 100644
--- a/libpod/oci_linux.go
+++ b/libpod/oci_linux.go
@@ -19,6 +19,8 @@ import (
"golang.org/x/sys/unix"
)
+const unknownPackage = "Unknown"
+
func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd *exec.Cmd) error {
if os.Geteuid() == 0 {
if r.cgroupManager == SystemdCgroupsManager {
@@ -63,10 +65,10 @@ func newPipe() (parent *os.File, child *os.File, err error) {
// CreateContainer creates a container in the OCI runtime
// TODO terminal support for container
// Presently just ignoring conmon opts related to it
-func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreContainer bool) (err error) {
+func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
if ctr.state.UserNSRoot == "" {
// no need of an intermediate mount ns
- return r.createOCIContainer(ctr, cgroupParent, restoreContainer)
+ return r.createOCIContainer(ctr, cgroupParent, restoreOptions)
}
var wg sync.WaitGroup
wg.Add(1)
@@ -74,7 +76,8 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
defer wg.Done()
runtime.LockOSThread()
- fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid()))
+ var fd *os.File
+ fd, err = os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid()))
if err != nil {
return
}
@@ -103,7 +106,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
if err != nil {
return
}
- err = r.createOCIContainer(ctr, cgroupParent, restoreContainer)
+ err = r.createOCIContainer(ctr, cgroupParent, restoreOptions)
}()
wg.Wait()
@@ -111,7 +114,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
}
func rpmVersion(path string) string {
- output := "Unknown"
+ output := unknownPackage
cmd := exec.Command("/usr/bin/rpm", "-q", "-f", path)
if outp, err := cmd.Output(); err == nil {
output = string(outp)
@@ -120,7 +123,7 @@ func rpmVersion(path string) string {
}
func dpkgVersion(path string) string {
- output := "Unknown"
+ output := unknownPackage
cmd := exec.Command("/usr/bin/dpkg", "-S", path)
if outp, err := cmd.Output(); err == nil {
output = string(outp)
@@ -129,14 +132,14 @@ func dpkgVersion(path string) string {
}
func (r *OCIRuntime) pathPackage() string {
- if out := rpmVersion(r.path); out != "Unknown" {
+ if out := rpmVersion(r.path); out != unknownPackage {
return out
}
return dpkgVersion(r.path)
}
func (r *OCIRuntime) conmonPackage() string {
- if out := rpmVersion(r.conmonPath); out != "Unknown" {
+ if out := rpmVersion(r.conmonPath); out != unknownPackage {
return out
}
return dpkgVersion(r.conmonPath)
diff --git a/libpod/oci_unsupported.go b/libpod/oci_unsupported.go
index b133eb402..8c084d1e2 100644
--- a/libpod/oci_unsupported.go
+++ b/libpod/oci_unsupported.go
@@ -15,7 +15,7 @@ func newPipe() (parent *os.File, child *os.File, err error) {
return nil, nil, ErrNotImplemented
}
-func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreContainer bool) (err error) {
+func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
return ErrNotImplemented
}
diff --git a/libpod/options.go b/libpod/options.go
index 8d044313b..9aa657ddd 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -7,6 +7,7 @@ import (
"regexp"
"syscall"
+ "github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -28,19 +29,59 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
return ErrRuntimeFinalized
}
- rt.config.StorageConfig.RunRoot = config.RunRoot
- rt.config.StorageConfig.GraphRoot = config.GraphRoot
- rt.config.StorageConfig.GraphDriverName = config.GraphDriverName
- rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod")
+ setField := false
- rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
- copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions)
+ if config.RunRoot != "" {
+ rt.config.StorageConfig.RunRoot = config.RunRoot
+ rt.configuredFrom.storageRunRootSet = true
+ setField = true
+ }
+
+ if config.GraphRoot != "" {
+ rt.config.StorageConfig.GraphRoot = config.GraphRoot
+ rt.configuredFrom.storageGraphRootSet = true
+
+ // Also set libpod static dir, so we are a subdirectory
+ // of the c/storage store by default
+ rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod")
+ rt.configuredFrom.libpodStaticDirSet = true
+
+ setField = true
+ }
+
+ if config.GraphDriverName != "" {
+ rt.config.StorageConfig.GraphDriverName = config.GraphDriverName
+ rt.configuredFrom.storageGraphDriverSet = true
+ setField = true
+ }
+
+ if config.GraphDriverOptions != nil {
+ rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
+ copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions)
+ setField = true
+ }
- rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap))
- copy(rt.config.StorageConfig.UIDMap, config.UIDMap)
+ if config.UIDMap != nil {
+ rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap))
+ copy(rt.config.StorageConfig.UIDMap, config.UIDMap)
+ }
- rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.GIDMap))
- copy(rt.config.StorageConfig.GIDMap, config.GIDMap)
+ if config.GIDMap != nil {
+ rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.GIDMap))
+ copy(rt.config.StorageConfig.GIDMap, config.GIDMap)
+ }
+
+ // If any one of runroot, graphroot, graphdrivername,
+ // or graphdriveroptions are set, then GraphRoot and RunRoot
+ // must be set
+ if setField {
+ if rt.config.StorageConfig.GraphRoot == "" {
+ rt.config.StorageConfig.GraphRoot = storage.DefaultStoreOptions.GraphRoot
+ }
+ if rt.config.StorageConfig.RunRoot == "" {
+ rt.config.StorageConfig.RunRoot = storage.DefaultStoreOptions.RunRoot
+ }
+ }
return nil
}
@@ -173,26 +214,26 @@ func WithStaticDir(dir string) RuntimeOption {
}
rt.config.StaticDir = dir
+ rt.configuredFrom.libpodStaticDirSet = true
return nil
}
}
-// WithHooksDir sets the directory to look for OCI runtime hooks config.
-// Note we are not saving this in database, since this is really just for used
-// for testing.
-func WithHooksDir(hooksDir string) RuntimeOption {
+// WithHooksDir sets the directories to look for OCI runtime hook configuration.
+func WithHooksDir(hooksDirs ...string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
return ErrRuntimeFinalized
}
- if hooksDir == "" {
- return errors.Wrap(ErrInvalidArg, "empty-string hook directories are not supported")
+ for _, hooksDir := range hooksDirs {
+ if hooksDir == "" {
+ return errors.Wrap(ErrInvalidArg, "empty-string hook directories are not supported")
+ }
}
- rt.config.HooksDir = []string{hooksDir}
- rt.config.HooksDirNotExistFatal = true
+ rt.config.HooksDir = hooksDirs
return nil
}
}
@@ -225,6 +266,7 @@ func WithTmpDir(dir string) RuntimeOption {
}
rt.config.TmpDir = dir
+ rt.configuredFrom.libpodTmpDirSet = true
return nil
}
@@ -304,6 +346,22 @@ func WithNamespace(ns string) RuntimeOption {
}
}
+// WithVolumePath sets the path under which all named volumes
+// should be created.
+// The path changes based on whethe rthe user is running as root
+// or not.
+func WithVolumePath(volPath string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.VolumePath = volPath
+
+ return nil
+ }
+}
+
// WithDefaultInfraImage sets the infra image for libpod.
// An infra image is used for inter-container kernel
// namespace sharing within a pod. Typically, an infra
@@ -817,7 +875,7 @@ func WithDependencyCtrs(ctrs []*Container) CtrCreateOption {
// namespace with a minimal configuration.
// An optional array of port mappings can be provided.
// Conflicts with WithNetNSFrom().
-func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, networks []string) CtrCreateOption {
+func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmode string, networks []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return ErrCtrFinalized
@@ -831,6 +889,7 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netwo
ctr.config.CreateNetNS = true
ctr.config.PortMappings = portMappings
ctr.config.Networks = networks
+ ctr.config.NetMode = namespaces.NetworkMode(netmode)
return nil
}
@@ -1101,6 +1160,70 @@ func withIsInfra() CtrCreateOption {
}
}
+// Volume Creation Options
+
+// WithVolumeName sets the name of the volume.
+func WithVolumeName(name string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ // Check the name against a regex
+ if !nameRegex.MatchString(name) {
+ return errors.Wrapf(ErrInvalidArg, "name must match regex [a-zA-Z0-9_-]+")
+ }
+ volume.config.Name = name
+
+ return nil
+ }
+}
+
+// WithVolumeLabels sets the labels of the volume.
+func WithVolumeLabels(labels map[string]string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ volume.config.Labels = make(map[string]string)
+ for key, value := range labels {
+ volume.config.Labels[key] = value
+ }
+
+ return nil
+ }
+}
+
+// WithVolumeDriver sets the driver of the volume.
+func WithVolumeDriver(driver string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ volume.config.Driver = driver
+
+ return nil
+ }
+}
+
+// WithVolumeOptions sets the options of the volume.
+func WithVolumeOptions(options map[string]string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ volume.config.Options = make(map[string]string)
+ for key, value := range options {
+ volume.config.Options[key] = value
+ }
+
+ return nil
+ }
+}
+
// Pod Creation Options
// WithPodName sets the name of the pod.
@@ -1295,3 +1418,14 @@ func WithInfraContainer() PodCreateOption {
return nil
}
}
+
+// WithInfraContainerPorts tells the pod to add port bindings to the pause container
+func WithInfraContainerPorts(bindings []ocicni.PortMapping) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return ErrPodFinalized
+ }
+ pod.config.InfraContainer.PortBindings = bindings
+ return nil
+ }
+}
diff --git a/libpod/pod.go b/libpod/pod.go
index 8ac976f6a..07f41f5c6 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -4,6 +4,7 @@ import (
"time"
"github.com/containers/storage"
+ "github.com/cri-o/ocicni/pkg/ocicni"
"github.com/pkg/errors"
)
@@ -96,7 +97,8 @@ type PodContainerInfo struct {
// InfraContainerConfig is the configuration for the pod's infra container
type InfraContainerConfig struct {
- HasInfraContainer bool `json:"makeInfraContainer"`
+ HasInfraContainer bool `json:"makeInfraContainer"`
+ PortBindings []ocicni.PortMapping `json:"infraPortBindings"`
}
// ID retrieves the pod's ID
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 3d5512e8c..cbac2420f 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -62,7 +62,13 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
return nil, nil
}
-// Stop stops all containers within a pod that are not already stopped
+// Stop stops all containers within a pod without a timeout. It assumes -1 for
+// a timeout.
+func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error) {
+ return p.StopWithTimeout(ctx, cleanup, -1)
+}
+
+// StopWithTimeout stops all containers within a pod that are not already stopped
// Each container will use its own stop timeout
// Only running containers will be stopped. Paused, stopped, or created
// containers will be ignored.
@@ -77,7 +83,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// If both error and the map are nil, all containers were stopped without error
-func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error) {
+func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()
@@ -110,8 +116,11 @@ func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error)
ctr.lock.Unlock()
continue
}
-
- if err := ctr.stop(ctr.config.StopTimeout); err != nil {
+ stopTimeout := ctr.config.StopTimeout
+ if timeout > -1 {
+ stopTimeout = uint(timeout)
+ }
+ if err := ctr.stop(stopTimeout); err != nil {
ctr.lock.Unlock()
ctrErrors[ctr.ID()] = err
continue
diff --git a/libpod/pod_easyjson.go b/libpod/pod_easyjson.go
index 6c1c939f3..8ea9a5e72 100644
--- a/libpod/pod_easyjson.go
+++ b/libpod/pod_easyjson.go
@@ -6,6 +6,7 @@ package libpod
import (
json "encoding/json"
+ ocicni "github.com/cri-o/ocicni/pkg/ocicni"
easyjson "github.com/mailru/easyjson"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -721,6 +722,29 @@ func easyjsonBe091417DecodeGithubComContainersLibpodLibpod5(in *jlexer.Lexer, ou
switch key {
case "makeInfraContainer":
out.HasInfraContainer = bool(in.Bool())
+ case "infraPortBindings":
+ if in.IsNull() {
+ in.Skip()
+ out.PortBindings = nil
+ } else {
+ in.Delim('[')
+ if out.PortBindings == nil {
+ if !in.IsDelim(']') {
+ out.PortBindings = make([]ocicni.PortMapping, 0, 1)
+ } else {
+ out.PortBindings = []ocicni.PortMapping{}
+ }
+ } else {
+ out.PortBindings = (out.PortBindings)[:0]
+ }
+ for !in.IsDelim(']') {
+ var v6 ocicni.PortMapping
+ easyjsonBe091417DecodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(in, &v6)
+ out.PortBindings = append(out.PortBindings, v6)
+ in.WantComma()
+ }
+ in.Delim(']')
+ }
default:
in.SkipRecursive()
}
@@ -745,5 +769,109 @@ func easyjsonBe091417EncodeGithubComContainersLibpodLibpod5(out *jwriter.Writer,
}
out.Bool(bool(in.HasInfraContainer))
}
+ {
+ const prefix string = ",\"infraPortBindings\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if in.PortBindings == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 {
+ out.RawString("null")
+ } else {
+ out.RawByte('[')
+ for v7, v8 := range in.PortBindings {
+ if v7 > 0 {
+ out.RawByte(',')
+ }
+ easyjsonBe091417EncodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(out, v8)
+ }
+ out.RawByte(']')
+ }
+ }
+ out.RawByte('}')
+}
+func easyjsonBe091417DecodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(in *jlexer.Lexer, out *ocicni.PortMapping) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "hostPort":
+ out.HostPort = int32(in.Int32())
+ case "containerPort":
+ out.ContainerPort = int32(in.Int32())
+ case "protocol":
+ out.Protocol = string(in.String())
+ case "hostIP":
+ out.HostIP = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjsonBe091417EncodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(out *jwriter.Writer, in ocicni.PortMapping) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ {
+ const prefix string = ",\"hostPort\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int32(int32(in.HostPort))
+ }
+ {
+ const prefix string = ",\"containerPort\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int32(int32(in.ContainerPort))
+ }
+ {
+ const prefix string = ",\"protocol\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Protocol))
+ }
+ {
+ const prefix string = ",\"hostIP\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.HostIP))
+ }
out.RawByte('}')
}
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 46162c7ef..39a25c004 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -48,7 +48,7 @@ func (p *Pod) updatePod() error {
// Save pod state to database
func (p *Pod) save() error {
if err := p.runtime.state.SavePod(p); err != nil {
- return errors.Wrapf(err, "error saving pod %s state")
+ return errors.Wrapf(err, "error saving pod %s state", p.ID())
}
return nil
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 318cd0369..2dfebf565 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -12,7 +12,6 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/firewall"
- "github.com/containers/libpod/pkg/hooks"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
@@ -84,6 +83,7 @@ type Runtime struct {
lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
+ configuredFrom *runtimeConfiguredFrom
}
// RuntimeConfig contains configuration options used to set up the runtime
@@ -92,6 +92,7 @@ type RuntimeConfig struct {
// Not included in on-disk config, use the dedicated containers/storage
// configuration file instead
StorageConfig storage.StoreOptions `toml:"-"`
+ VolumePath string `toml:"volume_path"`
// ImageDefaultTransport is the default transport method used to fetch
// images
ImageDefaultTransport string `toml:"image_default_transport"`
@@ -141,11 +142,11 @@ type RuntimeConfig struct {
// CNIDefaultNetwork is the network name of the default CNI network
// to attach pods to
CNIDefaultNetwork string `toml:"cni_default_network,omitempty"`
- // HooksDir Path to the directory containing hooks configuration files
+ // HooksDir holds paths to the directories containing hooks
+ // configuration files. When the same filename is present in in
+ // multiple directories, the file in the directory listed last in
+ // this slice takes precedence.
HooksDir []string `toml:"hooks_dir"`
- // HooksDirNotExistFatal switches between fatal errors and non-fatal
- // warnings if the configured HooksDir does not exist.
- HooksDirNotExistFatal bool `toml:"hooks_dir_not_exist_fatal"`
// DefaultMountsFile is the path to the default mounts file for testing
// purposes only
DefaultMountsFile string `toml:"-"`
@@ -175,6 +176,20 @@ type RuntimeConfig struct {
EnableLabeling bool `toml:"label"`
}
+// runtimeConfiguredFrom is a struct used during early runtime init to help
+// assemble the full RuntimeConfig struct from defaults.
+// It indicated whether several fields in the runtime configuration were set
+// explicitly.
+// If they were not, we may override them with information from the database,
+// if it exists and differs from what is present in the system already.
+type runtimeConfiguredFrom struct {
+ storageGraphDriverSet bool
+ storageGraphRootSet bool
+ storageRunRootSet bool
+ libpodStaticDirSet bool
+ libpodTmpDirSet bool
+}
+
var (
defaultRuntimeConfig = RuntimeConfig{
// Leave this empty so containers/storage will use its defaults
@@ -203,7 +218,6 @@ var (
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
CgroupManager: SystemdCgroupsManager,
- HooksDir: []string{hooks.DefaultDir, hooks.OverrideDir},
StaticDir: filepath.Join(storage.DefaultStoreOptions.GraphRoot, "libpod"),
TmpDir: "",
MaxLogSize: -1,
@@ -253,6 +267,7 @@ func SetXdgRuntimeDir(val string) error {
func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
runtime = new(Runtime)
runtime.config = new(RuntimeConfig)
+ runtime.configuredFrom = new(runtimeConfiguredFrom)
// Copy the default configuration
tmpDir, err := getDefaultTmpDir()
@@ -262,8 +277,20 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
deepcopier.Copy(defaultRuntimeConfig).To(runtime.config)
runtime.config.TmpDir = tmpDir
+ if rootless.IsRootless() {
+ // If we're rootless, override the default storage config
+ storageConf, volumePath, err := util.GetDefaultStoreOptions()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving rootless storage config")
+ }
+ runtime.config.StorageConfig = storageConf
+ runtime.config.StaticDir = filepath.Join(storageConf.GraphRoot, "libpod")
+ runtime.config.VolumePath = volumePath
+ }
+
configPath := ConfigPath
foundConfig := true
+ rootlessConfigPath := ""
if rootless.IsRootless() {
home := os.Getenv("HOME")
if runtime.config.SignaturePolicyPath == "" {
@@ -272,7 +299,10 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
runtime.config.SignaturePolicyPath = newPath
}
}
- configPath = filepath.Join(home, ".config/containers/libpod.conf")
+
+ rootlessConfigPath = filepath.Join(home, ".config/containers/libpod.conf")
+
+ configPath = rootlessConfigPath
if _, err := os.Stat(configPath); err != nil {
foundConfig = false
}
@@ -303,6 +333,25 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
if err != nil {
return nil, errors.Wrapf(err, "error reading configuration file %s", configPath)
}
+
+ // This is ugly, but we need to decode twice.
+ // Once to check if libpod static and tmp dirs were explicitly
+ // set (not enough to check if they're not the default value,
+ // might have been explicitly configured to the default).
+ // A second time to actually get a usable config.
+ tmpConfig := new(RuntimeConfig)
+ if _, err := toml.Decode(string(contents), tmpConfig); err != nil {
+ return nil, errors.Wrapf(err, "error decoding configuration file %s",
+ configPath)
+ }
+
+ if tmpConfig.StaticDir != "" {
+ runtime.configuredFrom.libpodStaticDirSet = true
+ }
+ if tmpConfig.TmpDir != "" {
+ runtime.configuredFrom.libpodTmpDirSet = true
+ }
+
if _, err := toml.Decode(string(contents), runtime.config); err != nil {
return nil, errors.Wrapf(err, "error decoding configuration file %s", configPath)
}
@@ -317,6 +366,22 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
if err := makeRuntime(runtime); err != nil {
return nil, err
}
+
+ if !foundConfig && rootlessConfigPath != "" {
+ os.MkdirAll(filepath.Dir(rootlessConfigPath), 0755)
+ file, err := os.OpenFile(rootlessConfigPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil && !os.IsExist(err) {
+ return nil, errors.Wrapf(err, "cannot open file %s", rootlessConfigPath)
+ }
+ if err == nil {
+ defer file.Close()
+ enc := toml.NewEncoder(file)
+ if err := enc.Encode(runtime.config); err != nil {
+ os.Remove(rootlessConfigPath)
+ }
+ }
+ }
+
return runtime, nil
}
@@ -328,6 +393,7 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
func NewRuntimeFromConfig(configPath string, options ...RuntimeOption) (runtime *Runtime, err error) {
runtime = new(Runtime)
runtime.config = new(RuntimeConfig)
+ runtime.configuredFrom = new(runtimeConfiguredFrom)
// Set two fields not in the TOML config
runtime.config.StateType = defaultRuntimeConfig.StateType
@@ -406,6 +472,77 @@ func makeRuntime(runtime *Runtime) (err error) {
runtime.config.ConmonPath)
}
+ // Make the static files directory if it does not exist
+ if err := os.MkdirAll(runtime.config.StaticDir, 0700); err != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err) {
+ return errors.Wrapf(err, "error creating runtime static files directory %s",
+ runtime.config.StaticDir)
+ }
+ }
+
+ // Set up the state
+ switch runtime.config.StateType {
+ case InMemoryStateStore:
+ state, err := NewInMemoryState()
+ if err != nil {
+ return err
+ }
+ runtime.state = state
+ case SQLiteStateStore:
+ return errors.Wrapf(ErrInvalidArg, "SQLite state is currently disabled")
+ case BoltDBStateStore:
+ dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db")
+
+ state, err := NewBoltState(dbPath, runtime)
+ if err != nil {
+ return err
+ }
+ runtime.state = state
+ default:
+ return errors.Wrapf(ErrInvalidArg, "unrecognized state type passed")
+ }
+
+ // Grab config from the database so we can reset some defaults
+ dbConfig, err := runtime.state.GetDBConfig()
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving runtime configuration from database")
+ }
+
+ // Reset defaults if they were not explicitly set
+ if !runtime.configuredFrom.storageGraphDriverSet && dbConfig.GraphDriver != "" {
+ runtime.config.StorageConfig.GraphDriverName = dbConfig.GraphDriver
+ }
+ if !runtime.configuredFrom.storageGraphRootSet && dbConfig.StorageRoot != "" {
+ runtime.config.StorageConfig.GraphRoot = dbConfig.StorageRoot
+ }
+ if !runtime.configuredFrom.storageRunRootSet && dbConfig.StorageTmp != "" {
+ runtime.config.StorageConfig.RunRoot = dbConfig.StorageTmp
+ }
+ if !runtime.configuredFrom.libpodStaticDirSet && dbConfig.LibpodRoot != "" {
+ runtime.config.StaticDir = dbConfig.LibpodRoot
+ }
+ if !runtime.configuredFrom.libpodTmpDirSet && dbConfig.LibpodTmp != "" {
+ runtime.config.TmpDir = dbConfig.LibpodTmp
+ }
+
+ logrus.Debugf("Using graph driver %s", runtime.config.StorageConfig.GraphDriverName)
+ logrus.Debugf("Using graph root %s", runtime.config.StorageConfig.GraphRoot)
+ logrus.Debugf("Using run root %s", runtime.config.StorageConfig.RunRoot)
+ logrus.Debugf("Using static dir %s", runtime.config.StaticDir)
+ logrus.Debugf("Using tmp dir %s", runtime.config.TmpDir)
+
+ // Validate our config against the database, now that we've set our
+ // final storage configuration
+ if err := runtime.state.ValidateDBConfig(runtime); err != nil {
+ return err
+ }
+
+ if err := runtime.state.SetNamespace(runtime.config.Namespace); err != nil {
+ return errors.Wrapf(err, "error setting libpod namespace in state")
+ }
+ logrus.Debugf("Set libpod namespace to %q", runtime.config.Namespace)
+
// Set up containers/storage
var store storage.Store
if rootless.SkipStorageSetup() {
@@ -473,15 +610,6 @@ func makeRuntime(runtime *Runtime) (err error) {
}
runtime.ociRuntime = ociRuntime
- // Make the static files directory if it does not exist
- if err := os.MkdirAll(runtime.config.StaticDir, 0755); err != nil {
- // The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating runtime static files directory %s",
- runtime.config.StaticDir)
- }
- }
-
// Make a directory to hold container lockfiles
lockDir := filepath.Join(runtime.config.TmpDir, "lock")
if err := os.MkdirAll(lockDir, 0755); err != nil {
@@ -503,11 +631,13 @@ func makeRuntime(runtime *Runtime) (err error) {
}
// Set up the CNI net plugin
- netPlugin, err := ocicni.InitCNI(runtime.config.CNIDefaultNetwork, runtime.config.CNIConfigDir, runtime.config.CNIPluginDir...)
- if err != nil {
- return errors.Wrapf(err, "error configuring CNI network plugin")
+ if !rootless.IsRootless() {
+ netPlugin, err := ocicni.InitCNI(runtime.config.CNIDefaultNetwork, runtime.config.CNIConfigDir, runtime.config.CNIPluginDir...)
+ if err != nil {
+ return errors.Wrapf(err, "error configuring CNI network plugin")
+ }
+ runtime.netPlugin = netPlugin
}
- runtime.netPlugin = netPlugin
// Set up a firewall backend
backendType := ""
@@ -520,33 +650,6 @@ func makeRuntime(runtime *Runtime) (err error) {
}
runtime.firewallBackend = fwBackend
- // Set up the state
- switch runtime.config.StateType {
- case InMemoryStateStore:
- state, err := NewInMemoryState()
- if err != nil {
- return err
- }
- runtime.state = state
- case SQLiteStateStore:
- return errors.Wrapf(ErrInvalidArg, "SQLite state is currently disabled")
- case BoltDBStateStore:
- dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db")
-
- state, err := NewBoltState(dbPath, runtime.lockDir, runtime)
- if err != nil {
- return err
- }
- runtime.state = state
- default:
- return errors.Wrapf(ErrInvalidArg, "unrecognized state type passed")
- }
-
- if err := runtime.state.SetNamespace(runtime.config.Namespace); err != nil {
- return errors.Wrapf(err, "error setting libpod namespace in state")
- }
- logrus.Debugf("Set libpod namespace to %q", runtime.config.Namespace)
-
// We now need to see if the system has restarted
// We check for the presence of a file in our tmp directory to verify this
// This check must be locked to prevent races
@@ -774,3 +877,8 @@ func (r *Runtime) generateName() (string, error) {
func (r *Runtime) ImageRuntime() *image.Runtime {
return r.imageRuntime
}
+
+// SystemContext returns the imagecontext
+func (r *Runtime) SystemContext() *types.SystemContext {
+ return r.imageContext
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 09dc7c48b..ba8eaacbe 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -154,6 +154,24 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
}()
+ // Go through the volume mounts and check for named volumes
+ // If the named volme already exists continue, otherwise create
+ // the storage for the named volume.
+ for i, vol := range ctr.config.Spec.Mounts {
+ if vol.Source[0] != '/' && isNamedVolume(vol.Source) {
+ volInfo, err := r.state.Volume(vol.Source)
+ if err != nil {
+ newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source))
+ if err != nil {
+ logrus.Errorf("error creating named volume %q: %v", vol.Source, err)
+ }
+ ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
+ continue
+ }
+ ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
+ }
+ }
+
if ctr.config.LogPath == "" {
ctr.config.LogPath = filepath.Join(ctr.config.StaticDir, "ctr.log")
}
@@ -170,6 +188,7 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
}
+
// Add the container to the state
// TODO: May be worth looking into recovering from name/ID collisions here
if ctr.config.Pod != "" {
@@ -246,7 +265,19 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
if c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is paused, cannot remove until unpaused", c.ID())
+ if !force {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %s is paused, cannot remove until unpaused", c.ID())
+ }
+ if err := c.runtime.ociRuntime.killContainer(c, 9); err != nil {
+ return err
+ }
+ if err := c.unpause(); err != nil {
+ return err
+ }
+ // Need to update container state to make sure we know it's stopped
+ if err := c.waitForExitFileAndSync(); err != nil {
+ return err
+ }
}
// Check that the container's in a good state to be removed
@@ -462,3 +493,11 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
}
return ctrs[lastCreatedIndex], nil
}
+
+// Check if volName is a named volume and not one of the default mounts we add to containers
+func isNamedVolume(volName string) bool {
+ if volName != "proc" && volName != "tmpfs" && volName != "devpts" && volName != "shm" && volName != "mqueue" && volName != "sysfs" && volName != "cgroup" {
+ return true
+ }
+ return false
+}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index be8711734..66844bb31 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -3,50 +3,15 @@ package libpod
import (
"context"
"fmt"
- "io"
"github.com/containers/buildah/imagebuildah"
- "github.com/containers/libpod/libpod/common"
"github.com/containers/libpod/libpod/image"
"github.com/containers/storage"
- "github.com/containers/storage/pkg/archive"
- ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Runtime API
-// CopyOptions contains the options given when pushing or pulling images
-type CopyOptions struct {
- // Compression specifies the type of compression which is applied to
- // layer blobs. The default is to not use compression, but
- // archive.Gzip is recommended.
- Compression archive.Compression
- // DockerRegistryOptions encapsulates settings that affect how we
- // connect or authenticate to a remote registry to which we want to
- // push the image.
- common.DockerRegistryOptions
- // SigningOptions encapsulates settings that control whether or not we
- // strip or add signatures to the image when pushing (uploading) the
- // image to a registry.
- common.SigningOptions
-
- // SigningPolicyPath this points to a alternative signature policy file, used mainly for testing
- SignaturePolicyPath string
- // AuthFile is the path of the cached credentials file defined by the user
- AuthFile string
- // Writer is the reportWriter for the output
- Writer io.Writer
- // Reference is the name for the image created when a tar archive is imported
- Reference string
- // ImageConfig is the Image spec for the image created when a tar archive is imported
- ImageConfig ociv1.Image
- // ManifestMIMEType is the manifest type of the image when saving to a directory
- ManifestMIMEType string
- // ForceCompress compresses the image layers when saving to a directory using the dir transport if true
- ForceCompress bool
-}
-
// RemoveImage deletes an image from local storage
// Images being used by running containers can only be removed if force=true
func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool) (string, error) {
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index fea79e994..5e1051150 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -7,7 +7,6 @@ import (
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
- "github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
)
@@ -50,9 +49,12 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID
options = append(options, withIsInfra())
// Since user namespace sharing is not implemented, we only need to check if it's rootless
- portMappings := make([]ocicni.PortMapping, 0)
networks := make([]string, 0)
- options = append(options, WithNetNS(portMappings, isRootless, networks))
+ netmode := "bridge"
+ if isRootless {
+ netmode = "slirp4netns"
+ }
+ options = append(options, WithNetNS(p.config.InfraContainer.PortBindings, isRootless, netmode, networks))
return r.newContainer(ctx, g.Config, options...)
}
@@ -65,7 +67,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container,
return nil, ErrRuntimeStopped
}
- newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, false, false)
+ newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, false)
if err != nil {
return nil, err
}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
new file mode 100644
index 000000000..3921758ee
--- /dev/null
+++ b/libpod/runtime_volume.go
@@ -0,0 +1,107 @@
+package libpod
+
+import (
+ "context"
+)
+
+// Contains the public Runtime API for volumes
+
+// A VolumeCreateOption is a functional option which alters the Volume created by
+// NewVolume
+type VolumeCreateOption func(*Volume) error
+
+// VolumeFilter is a function to determine whether a volume is included in command
+// output. Volumes to be outputted are tested using the function. a true return will
+// include the volume, a false return will exclude it.
+type VolumeFilter func(*Volume) bool
+
+// RemoveVolume removes a volumes
+func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
+ if !v.valid {
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ // Volume probably already removed
+ // Or was never in the runtime to begin with
+ return nil
+ }
+ }
+
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ return r.removeVolume(ctx, v, force, prune)
+}
+
+// GetVolume retrieves a volume by its name
+func (r *Runtime) GetVolume(name string) (*Volume, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.Volume(name)
+}
+
+// HasVolume checks to see if a volume with the given name exists
+func (r *Runtime) HasVolume(name string) (bool, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return false, ErrRuntimeStopped
+ }
+
+ return r.state.HasVolume(name)
+}
+
+// Volumes retrieves all volumes
+// Filters can be provided which will determine which volumes are included in the
+// output. Multiple filters are handled by ANDing their output, so only volumes
+// matching all filters are returned
+func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ vols, err := r.state.AllVolumes()
+ if err != nil {
+ return nil, err
+ }
+
+ volsFiltered := make([]*Volume, 0, len(vols))
+ for _, vol := range vols {
+ include := true
+ for _, filter := range filters {
+ include = include && filter(vol)
+ }
+
+ if include {
+ volsFiltered = append(volsFiltered, vol)
+ }
+ }
+
+ return volsFiltered, nil
+}
+
+// GetAllVolumes retrieves all the volumes
+func (r *Runtime) GetAllVolumes() ([]*Volume, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.AllVolumes()
+}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
new file mode 100644
index 000000000..5cc0938f0
--- /dev/null
+++ b/libpod/runtime_volume_linux.go
@@ -0,0 +1,132 @@
+// +build linux
+
+package libpod
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// NewVolume creates a new empty volume
+func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+ return r.newVolume(ctx, options...)
+}
+
+// newVolume creates a new empty volume
+func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+ volume, err := newVolume(r)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating volume")
+ }
+
+ for _, option := range options {
+ if err := option(volume); err != nil {
+ return nil, errors.Wrapf(err, "error running volume create option")
+ }
+ }
+
+ if volume.config.Name == "" {
+ volume.config.Name = stringid.GenerateNonCryptoID()
+ }
+ // TODO: support for other volume drivers
+ if volume.config.Driver == "" {
+ volume.config.Driver = "local"
+ }
+ // TODO: determine when the scope is global and set it to that
+ if volume.config.Scope == "" {
+ volume.config.Scope = "local"
+ }
+
+ // Create the mountpoint of this volume
+ fullVolPath := filepath.Join(r.config.VolumePath, volume.config.Name, "_data")
+ if err := os.MkdirAll(fullVolPath, 0755); err != nil {
+ return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath)
+ }
+ _, mountLabel, err := label.InitLabels([]string{})
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting default mountlabels")
+ }
+ if err := label.ReleaseLabel(mountLabel); err != nil {
+ return nil, errors.Wrapf(err, "error releasing label %q", mountLabel)
+ }
+ if err := label.Relabel(fullVolPath, mountLabel, true); err != nil {
+ return nil, errors.Wrapf(err, "error setting selinux label to %q", fullVolPath)
+ }
+ volume.config.MountPoint = fullVolPath
+
+ // Path our lock file will reside at
+ lockPath := filepath.Join(r.lockDir, volume.config.Name)
+ // Grab a lockfile at the given path
+ lock, err := storage.GetLockfile(lockPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating lockfile for new volume")
+ }
+ volume.lock = lock
+
+ volume.valid = true
+
+ // Add the volume to state
+ if err := r.state.AddVolume(volume); err != nil {
+ return nil, errors.Wrapf(err, "error adding volume to state")
+ }
+
+ return volume, nil
+}
+
+// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+ if !v.valid {
+ return ErrNoSuchVolume
+ }
+
+ deps, err := r.state.VolumeInUse(v)
+ if err != nil {
+ return err
+ }
+ if len(deps) != 0 {
+ if prune {
+ return ErrVolumeBeingUsed
+ }
+ depsStr := strings.Join(deps, ", ")
+ if !force {
+ return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ }
+ // If using force, log the warning that the volume is being used by at least one container
+ logrus.Warnf("volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ // Remove the container dependencies so we can go ahead and delete the volume
+ for _, dep := range deps {
+ if err := r.state.RemoveVolCtrDep(v, dep); err != nil {
+ return errors.Wrapf(err, "unable to remove container dependency %q from volume %q while trying to delete volume by force", dep, v.Name())
+ }
+ }
+ }
+
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ // Remove the volume from the state
+ if err := r.state.RemoveVolume(v); err != nil {
+ return errors.Wrapf(err, "error removing volume %s", v.Name())
+ }
+
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
+
+ return nil
+}
diff --git a/libpod/state.go b/libpod/state.go
index 273e81318..88d89f673 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -1,5 +1,15 @@
package libpod
+// DBConfig is a set of Libpod runtime configuration settings that are saved
+// in a State when it is first created, and can subsequently be retrieved.
+type DBConfig struct {
+ LibpodRoot string
+ LibpodTmp string
+ StorageRoot string
+ StorageTmp string
+ GraphDriver string
+}
+
// State is a storage backend for libpod's current state.
// A State is only initialized once per instance of libpod.
// As such, initialization methods for State implementations may safely assume
@@ -21,6 +31,22 @@ type State interface {
// Refresh clears container and pod states after a reboot
Refresh() error
+ // GetDBConfig retrieves several paths configured within the database
+ // when it was created - namely, Libpod root and tmp dirs, c/storage
+ // root and tmp dirs, and c/storage graph driver.
+ // This is not implemented by the in-memory state, as it has no need to
+ // validate runtime configuration.
+ GetDBConfig() (*DBConfig, error)
+
+ // ValidateDBConfig validates the config in the given Runtime struct
+ // against paths stored in the configured database.
+ // Libpod root and tmp dirs and c/storage root and tmp dirs and graph
+ // driver are validated.
+ // This is not implemented by the in-memory state, as it has no need to
+ // validate runtime configuration that may change over multiple runs of
+ // the program.
+ ValidateDBConfig(runtime *Runtime) error
+
// SetNamespace() sets the namespace for the store, and will determine
// what containers are retrieved with container and pod retrieval calls.
// A namespace of "", the empty string, acts as no namespace, and
@@ -127,4 +153,27 @@ type State interface {
// If a namespace has been set, only pods in that namespace will be
// returned.
AllPods() ([]*Pod, error)
+
+ // Volume accepts full name of volume
+ // If the volume doesn't exist, an error will be returned
+ Volume(volName string) (*Volume, error)
+ // HasVolume returns true if volName exists in the state,
+ // otherwise it returns false
+ HasVolume(volName string) (bool, error)
+ // VolumeInUse goes through the container dependencies of a volume
+ // and checks if the volume is being used by any container. If it is
+ // a slice of container IDs using the volume is returned
+ VolumeInUse(volume *Volume) ([]string, error)
+ // AddVolume adds the specified volume to state. The volume's name
+ // must be unique within the list of existing volumes
+ AddVolume(volume *Volume) error
+ // RemoveVolCtrDep updates the list of container dependencies that the
+ // volume has. It either deletes the dependent container ID from
+ // the sub-bucket
+ RemoveVolCtrDep(volume *Volume, ctrID string) error
+ // RemoveVolume removes the specified volume.
+ // Only volumes that have no container dependencies can be removed
+ RemoveVolume(volume *Volume) error
+ // AllVolumes returns all the volumes available in the state
+ AllVolumes() ([]*Volume, error)
}
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 04572fb29..d93a371f3 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -45,11 +45,16 @@ func getEmptyBoltState() (s State, p string, p2 string, err error) {
dbPath := filepath.Join(tmpDir, "db.sql")
lockDir := filepath.Join(tmpDir, "locks")
+ if err := os.Mkdir(lockDir, 0755); err != nil {
+ return nil, "", "", err
+ }
+
runtime := new(Runtime)
runtime.config = new(RuntimeConfig)
runtime.config.StorageConfig = storage.StoreOptions{}
+ runtime.lockDir = lockDir
- state, err := NewBoltState(dbPath, lockDir, runtime)
+ state, err := NewBoltState(dbPath, runtime)
if err != nil {
return nil, "", "", err
}
diff --git a/libpod/testdata/config.toml b/libpod/testdata/config.toml
index e19d36017..1d78f2083 100644
--- a/libpod/testdata/config.toml
+++ b/libpod/testdata/config.toml
@@ -14,7 +14,7 @@
seccomp_profile = "/etc/crio/seccomp.json"
apparmor_profile = "crio-default"
cgroup_manager = "cgroupfs"
- hooks_dir_path = "/usr/share/containers/oci/hooks.d"
+ hooks_dir = ["/usr/share/containers/oci/hooks.d"]
pids_limit = 2048
container_exits_dir = "/var/run/podman/exits"
[crio.image]
diff --git a/libpod/util.go b/libpod/util.go
index 7007b29cd..b7578135a 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -9,10 +9,9 @@ import (
"strings"
"time"
- "github.com/containerd/cgroups"
"github.com/containers/image/signature"
"github.com/containers/image/types"
- "github.com/containers/libpod/pkg/util"
+ "github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@@ -90,31 +89,64 @@ func MountExists(specMounts []spec.Mount, dest string) bool {
}
// WaitForFile waits until a file has been created or the given timeout has occurred
-func WaitForFile(path string, timeout time.Duration) error {
+func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, error) {
done := make(chan struct{})
chControl := make(chan struct{})
+
+ var inotifyEvents chan fsnotify.Event
+ var timer chan struct{}
+ watcher, err := fsnotify.NewWatcher()
+ if err == nil {
+ if err := watcher.Add(filepath.Dir(path)); err == nil {
+ inotifyEvents = watcher.Events
+ }
+ defer watcher.Close()
+ }
+ if inotifyEvents == nil {
+ // If for any reason we fail to create the inotify
+ // watcher, fallback to polling the file
+ timer = make(chan struct{})
+ go func() {
+ select {
+ case <-chControl:
+ close(timer)
+ return
+ default:
+ time.Sleep(25 * time.Millisecond)
+ timer <- struct{}{}
+ }
+ }()
+ }
+
go func() {
for {
select {
case <-chControl:
return
- default:
+ case <-timer:
+ _, err := os.Stat(path)
+ if err == nil {
+ close(done)
+ return
+ }
+ case <-inotifyEvents:
_, err := os.Stat(path)
if err == nil {
close(done)
return
}
- time.Sleep(25 * time.Millisecond)
}
}
}()
select {
+ case e := <-chWait:
+ return true, e
case <-done:
- return nil
+ return false, nil
case <-time.After(timeout):
close(chControl)
- return errors.Wrapf(ErrInternal, "timed out waiting for file %s", path)
+ return false, errors.Wrapf(ErrInternal, "timed out waiting for file %s", path)
}
}
@@ -155,26 +187,3 @@ func validPodNSOption(p *Pod, ctrPod string) error {
}
return nil
}
-
-// GetV1CGroups gets the V1 cgroup subsystems and then "filters"
-// out any subsystems that are provided by the caller. Passing nil
-// for excludes will return the subsystems unfiltered.
-//func GetV1CGroups(excludes []string) ([]cgroups.Subsystem, error) {
-func GetV1CGroups(excludes []string) cgroups.Hierarchy {
- return func() ([]cgroups.Subsystem, error) {
- var filtered []cgroups.Subsystem
-
- subSystem, err := cgroups.V1()
- if err != nil {
- return nil, err
- }
- for _, s := range subSystem {
- // If the name of the subsystem is not in the list of excludes, then
- // add it as a keeper.
- if !util.StringInSlice(string(s.Name()), excludes) {
- filtered = append(filtered, s)
- }
- }
- return filtered, nil
- }
-}
diff --git a/libpod/util_linux.go b/libpod/util_linux.go
index 0cd486379..30e2538c3 100644
--- a/libpod/util_linux.go
+++ b/libpod/util_linux.go
@@ -7,6 +7,7 @@ import (
"strings"
"github.com/containerd/cgroups"
+ "github.com/containers/libpod/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -67,3 +68,26 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
return final, nil
}
+
+// GetV1CGroups gets the V1 cgroup subsystems and then "filters"
+// out any subsystems that are provided by the caller. Passing nil
+// for excludes will return the subsystems unfiltered.
+//func GetV1CGroups(excludes []string) ([]cgroups.Subsystem, error) {
+func GetV1CGroups(excludes []string) cgroups.Hierarchy {
+ return func() ([]cgroups.Subsystem, error) {
+ var filtered []cgroups.Subsystem
+
+ subSystem, err := cgroups.V1()
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range subSystem {
+ // If the name of the subsystem is not in the list of excludes, then
+ // add it as a keeper.
+ if !util.StringInSlice(string(s.Name()), excludes) {
+ filtered = append(filtered, s)
+ }
+ }
+ return filtered, nil
+ }
+}
diff --git a/libpod/volume.go b/libpod/volume.go
new file mode 100644
index 000000000..b732e8aa7
--- /dev/null
+++ b/libpod/volume.go
@@ -0,0 +1,63 @@
+package libpod
+
+import "github.com/containers/storage"
+
+// Volume is the type used to create named volumes
+// TODO: all volumes should be created using this and the Volume API
+type Volume struct {
+ config *VolumeConfig
+
+ valid bool
+ runtime *Runtime
+ lock storage.Locker
+}
+
+// VolumeConfig holds the volume's config information
+//easyjson:json
+type VolumeConfig struct {
+ Name string `json:"name"`
+ Labels map[string]string `json:"labels"`
+ MountPoint string `json:"mountPoint"`
+ Driver string `json:"driver"`
+ Options map[string]string `json:"options"`
+ Scope string `json:"scope"`
+}
+
+// Name retrieves the volume's name
+func (v *Volume) Name() string {
+ return v.config.Name
+}
+
+// Labels returns the volume's labels
+func (v *Volume) Labels() map[string]string {
+ labels := make(map[string]string)
+ for key, value := range v.config.Labels {
+ labels[key] = value
+ }
+ return labels
+}
+
+// MountPoint returns the volume's mountpoint on the host
+func (v *Volume) MountPoint() string {
+ return v.config.MountPoint
+}
+
+// Driver returns the volume's driver
+func (v *Volume) Driver() string {
+ return v.config.Driver
+}
+
+// Options return the volume's options
+func (v *Volume) Options() map[string]string {
+ options := make(map[string]string)
+ for key, value := range v.config.Options {
+ options[key] = value
+ }
+
+ return options
+}
+
+// Scope returns the scope of the volume
+func (v *Volume) Scope() string {
+ return v.config.Scope
+}
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
new file mode 100644
index 000000000..800e6d106
--- /dev/null
+++ b/libpod/volume_internal.go
@@ -0,0 +1,29 @@
+package libpod
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// VolumePath is the path under which all volumes that are created using the
+// local driver will be created
+// const VolumePath = "/var/lib/containers/storage/volumes"
+
+// Creates a new volume
+func newVolume(runtime *Runtime) (*Volume, error) {
+ volume := new(Volume)
+ volume.config = new(VolumeConfig)
+ volume.runtime = runtime
+ volume.config.Labels = make(map[string]string)
+ volume.config.Options = make(map[string]string)
+
+ return volume, nil
+}
+
+// teardownStorage deletes the volume from volumePath
+func (v *Volume) teardownStorage() error {
+ if !v.valid {
+ return ErrNoSuchVolume
+ }
+ return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
+}
diff --git a/pkg/inspect/inspect.go b/pkg/inspect/inspect.go
index 62ba53147..5bdcf677f 100644
--- a/pkg/inspect/inspect.go
+++ b/pkg/inspect/inspect.go
@@ -126,6 +126,7 @@ type ImageData struct {
Annotations map[string]string `json:"Annotations"`
ManifestType string `json:"ManifestType"`
User string `json:"User"`
+ History []v1.History `json:"History"`
}
// RootFS holds the root fs information of an image
diff --git a/pkg/lookup/lookup.go b/pkg/lookup/lookup.go
index a9d975b4b..70b97144f 100644
--- a/pkg/lookup/lookup.go
+++ b/pkg/lookup/lookup.go
@@ -99,9 +99,11 @@ func GetContainerGroups(groups []string, containerMount string, override *Overri
return uintgids, nil
}
-// GetUser takes a containermount path and user name or id and returns
+// GetUser takes a containermount path and user name or ID and returns
// a matching User structure from /etc/passwd. If it cannot locate a user
// with the provided information, an ErrNoPasswdEntries is returned.
+// When the provided user name was an ID, a User structure with Uid
+// set is returned along with ErrNoPasswdEntries.
func GetUser(containerMount, userIDorName string) (*user.User, error) {
var inputIsName bool
uid, err := strconv.Atoi(userIDorName)
@@ -124,12 +126,17 @@ func GetUser(containerMount, userIDorName string) (*user.User, error) {
if len(users) > 0 {
return &users[0], nil
}
+ if !inputIsName {
+ return &user.User{Uid: uid}, user.ErrNoPasswdEntries
+ }
return nil, user.ErrNoPasswdEntries
}
-// GetGroup takes ac ontainermount path and a group name or id and returns
-// a match Group struct from /etc/group. if it cannot locate a group,
-// an ErrNoGroupEntries error is returned.
+// GetGroup takes a containermount path and a group name or ID and returns
+// a match Group struct from /etc/group. If it cannot locate a group,
+// an ErrNoGroupEntries error is returned. When the provided group name
+// was an ID, a Group structure with Gid set is returned along with
+// ErrNoGroupEntries.
func GetGroup(containerMount, groupIDorName string) (*user.Group, error) {
var inputIsName bool
gid, err := strconv.Atoi(groupIDorName)
@@ -154,5 +161,8 @@ func GetGroup(containerMount, groupIDorName string) (*user.Group, error) {
if len(groups) > 0 {
return &groups[0], nil
}
+ if !inputIsName {
+ return &user.Group{Gid: gid}, user.ErrNoGroupEntries
+ }
return nil, user.ErrNoGroupEntries
}
diff --git a/pkg/namespaces/namespaces.go b/pkg/namespaces/namespaces.go
index bee833fa9..832efd554 100644
--- a/pkg/namespaces/namespaces.go
+++ b/pkg/namespaces/namespaces.go
@@ -223,7 +223,12 @@ func (n NetworkMode) IsBridge() bool {
return n == "bridge"
}
+// IsSlirp4netns indicates if we are running a rootless network stack
+func (n NetworkMode) IsSlirp4netns() bool {
+ return n == "slirp4netns"
+}
+
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
- return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
+ return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() && !n.IsSlirp4netns()
}
diff --git a/pkg/registries/registries.go b/pkg/registries/registries.go
index 73aa93d68..cbb8b730c 100644
--- a/pkg/registries/registries.go
+++ b/pkg/registries/registries.go
@@ -13,21 +13,28 @@ import (
// userRegistriesFile is the path to the per user registry configuration file.
var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf")
-// GetRegistries obtains the list of registries defined in the global registries file.
-func GetRegistries() ([]string, error) {
- registryConfigPath := ""
+// SystemRegistriesConfPath returns an appropriate value for types.SystemContext.SystemRegistriesConfPath
+// (possibly "", which is not an error), taking into account rootless mode and environment variable overrides.
+//
+// FIXME: This should be centralized in a global SystemContext initializer inherited throughout the code,
+// not haphazardly called throughout the way it is being called now.
+func SystemRegistriesConfPath() string {
+ if envOverride := os.Getenv("REGISTRIES_CONFIG_PATH"); len(envOverride) > 0 {
+ return envOverride
+ }
if rootless.IsRootless() {
if _, err := os.Stat(userRegistriesFile); err == nil {
- registryConfigPath = userRegistriesFile
+ return userRegistriesFile
}
}
- envOverride := os.Getenv("REGISTRIES_CONFIG_PATH")
- if len(envOverride) > 0 {
- registryConfigPath = envOverride
- }
- searchRegistries, err := sysregistries.GetRegistries(&types.SystemContext{SystemRegistriesConfPath: registryConfigPath})
+ return ""
+}
+
+// GetRegistries obtains the list of registries defined in the global registries file.
+func GetRegistries() ([]string, error) {
+ searchRegistries, err := sysregistries.GetRegistries(&types.SystemContext{SystemRegistriesConfPath: SystemRegistriesConfPath()})
if err != nil {
return nil, errors.Wrapf(err, "unable to parse the registries.conf file")
}
@@ -36,17 +43,7 @@ func GetRegistries() ([]string, error) {
// GetInsecureRegistries obtains the list of insecure registries from the global registration file.
func GetInsecureRegistries() ([]string, error) {
- registryConfigPath := ""
-
- if _, err := os.Stat(userRegistriesFile); err == nil {
- registryConfigPath = userRegistriesFile
- }
-
- envOverride := os.Getenv("REGISTRIES_CONFIG_PATH")
- if len(envOverride) > 0 {
- registryConfigPath = envOverride
- }
- registries, err := sysregistries.GetInsecureRegistries(&types.SystemContext{SystemRegistriesConfPath: registryConfigPath})
+ registries, err := sysregistries.GetInsecureRegistries(&types.SystemContext{SystemRegistriesConfPath: SystemRegistriesConfPath()})
if err != nil {
return nil, errors.Wrapf(err, "unable to parse the registries.conf file")
}
diff --git a/pkg/resolvconf/resolvconf.go b/pkg/resolvconf/resolvconf.go
index fccd60093..e85bcb377 100644
--- a/pkg/resolvconf/resolvconf.go
+++ b/pkg/resolvconf/resolvconf.go
@@ -103,13 +103,21 @@ func GetLastModified() *File {
}
// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs:
-// 1. It looks for localhost (127.*|::1) entries in the provided
+// 1. If a netns is enabled, it looks for localhost (127.*|::1) entries in the provided
// resolv.conf, removing local nameserver entries, and, if the resulting
// cleaned config has no defined nameservers left, adds default DNS entries
// 2. Given the caller provides the enable/disable state of IPv6, the filter
// code will remove all IPv6 nameservers if it is not enabled for containers
//
-func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) {
+func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) (*File, error) {
+ // If we're using the host netns, we have nothing to do besides hash the file.
+ if !netnsEnabled {
+ hash, err := ioutils.HashData(bytes.NewReader(resolvConf))
+ if err != nil {
+ return nil, err
+ }
+ return &File{Content: resolvConf, Hash: hash}, nil
+ }
cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})
// if IPv6 is not enabled, also clean out any IPv6 address nameserver
if !ipv6Enabled {
diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go
index 5c45f2694..07002da3f 100644
--- a/pkg/rootless/rootless_linux.go
+++ b/pkg/rootless/rootless_linux.go
@@ -12,6 +12,7 @@ import (
"runtime"
"strconv"
"strings"
+ "sync"
"syscall"
"unsafe"
@@ -33,9 +34,17 @@ func runInUser() error {
return nil
}
+var (
+ isRootlessOnce sync.Once
+ isRootless bool
+)
+
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
- return os.Geteuid() != 0 || os.Getenv("_LIBPOD_USERNS_CONFIGURED") != ""
+ isRootlessOnce.Do(func() {
+ isRootless = os.Geteuid() != 0 || os.Getenv("_LIBPOD_USERNS_CONFIGURED") != ""
+ })
+ return isRootless
}
var (
@@ -65,7 +74,7 @@ func GetRootlessUID() int {
func tryMappingTool(tool string, pid int, hostID int, mappings []idtools.IDMap) error {
path, err := exec.LookPath(tool)
if err != nil {
- return err
+ return errors.Wrapf(err, "cannot find %s", tool)
}
appendTriplet := func(l []string, a, b, c int) []string {
@@ -83,7 +92,11 @@ func tryMappingTool(tool string, pid int, hostID int, mappings []idtools.IDMap)
Path: path,
Args: args,
}
- return cmd.Run()
+
+ if err := cmd.Run(); err != nil {
+ return errors.Wrapf(err, "cannot setup namespace using %s", tool)
+ }
+ return nil
}
// JoinNS re-exec podman in a new userNS and join the user namespace of the specified
@@ -182,11 +195,16 @@ func BecomeRootInUserNS() (bool, int, error) {
return false, -1, errors.Errorf("cannot re-exec process")
}
+ allowSingleIDMapping := os.Getenv("PODMAN_ALLOW_SINGLE_ID_MAPPING_IN_USERNS") != ""
+
var uids, gids []idtools.IDMap
username := os.Getenv("USER")
if username == "" {
user, err := user.LookupId(fmt.Sprintf("%d", os.Getuid()))
- if err != nil && os.Getenv("PODMAN_ALLOW_SINGLE_ID_MAPPING_IN_USERNS") == "" {
+ if err != nil && !allowSingleIDMapping {
+ if os.IsNotExist(err) {
+ return false, 0, errors.Wrapf(err, "/etc/subuid or /etc/subgid does not exist, see subuid/subgid man pages for information on these files")
+ }
return false, 0, errors.Wrapf(err, "could not find user by UID nor USER env was set")
}
if err == nil {
@@ -194,7 +212,7 @@ func BecomeRootInUserNS() (bool, int, error) {
}
}
mappings, err := idtools.NewIDMappings(username, username)
- if os.Getenv("PODMAN_ALLOW_SINGLE_ID_MAPPING_IN_USERNS") == "" {
+ if !allowSingleIDMapping {
if err != nil {
return false, -1, err
}
@@ -224,7 +242,11 @@ func BecomeRootInUserNS() (bool, int, error) {
uidsMapped := false
if mappings != nil && uids != nil {
- uidsMapped = tryMappingTool("newuidmap", pid, os.Getuid(), uids) == nil
+ err := tryMappingTool("newuidmap", pid, os.Getuid(), uids)
+ if !allowSingleIDMapping && err != nil {
+ return false, 0, err
+ }
+ uidsMapped = err == nil
}
if !uidsMapped {
setgroups := fmt.Sprintf("/proc/%d/setgroups", pid)
@@ -242,7 +264,11 @@ func BecomeRootInUserNS() (bool, int, error) {
gidsMapped := false
if mappings != nil && gids != nil {
- gidsMapped = tryMappingTool("newgidmap", pid, os.Getgid(), gids) == nil
+ err := tryMappingTool("newgidmap", pid, os.Getgid(), gids)
+ if !allowSingleIDMapping && err != nil {
+ return false, 0, err
+ }
+ gidsMapped = err == nil
}
if !gidsMapped {
gidMap := fmt.Sprintf("/proc/%d/gid_map", pid)
diff --git a/pkg/secrets/secrets.go b/pkg/secrets/secrets.go
index 7208f53b7..242953609 100644
--- a/pkg/secrets/secrets.go
+++ b/pkg/secrets/secrets.go
@@ -149,6 +149,15 @@ func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPre
mountFiles = append(mountFiles, []string{OverrideMountsFile, DefaultMountsFile}...)
if rootless.IsRootless() {
mountFiles = append([]string{UserOverrideMountsFile}, mountFiles...)
+ _, err := os.Stat(UserOverrideMountsFile)
+ if err != nil && os.IsNotExist(err) {
+ os.MkdirAll(filepath.Dir(UserOverrideMountsFile), 0755)
+ if f, err := os.Create(UserOverrideMountsFile); err != nil {
+ logrus.Warnf("could not create file %s: %v", UserOverrideMountsFile, err)
+ } else {
+ f.Close()
+ }
+ }
}
} else {
mountFiles = append(mountFiles, mountFile)
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 6ac9d82da..25f8cd7a1 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -335,7 +335,6 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime) ([]lib
}
options = append(options, runtime.WithPod(pod))
}
-
if len(c.PortBindings) > 0 {
portBindings, err = c.CreatePortBindings()
if err != nil {
@@ -392,11 +391,11 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime) ([]lib
options = append(options, libpod.WithNetNSFrom(connectedCtr))
} else if !c.NetMode.IsHost() && !c.NetMode.IsNone() {
isRootless := rootless.IsRootless()
- postConfigureNetNS := isRootless || (len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0) && !c.UsernsMode.IsHost()
+ postConfigureNetNS := c.NetMode.IsSlirp4netns() || (len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0) && !c.UsernsMode.IsHost()
if isRootless && len(portBindings) > 0 {
return nil, errors.New("port bindings are not yet supported by rootless containers")
}
- options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, networks))
+ options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(c.NetMode), networks))
}
if c.PidMode.IsContainer() {
@@ -497,8 +496,13 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime) ([]lib
// CreatePortBindings iterates ports mappings and exposed ports into a format CNI understands
func (c *CreateConfig) CreatePortBindings() ([]ocicni.PortMapping, error) {
+ return NatToOCIPortBindings(c.PortBindings)
+}
+
+// NatToOCIPortBindings iterates a nat.portmap slice and creates []ocicni portmapping slice
+func NatToOCIPortBindings(ports nat.PortMap) ([]ocicni.PortMapping, error) {
var portBindings []ocicni.PortMapping
- for containerPb, hostPb := range c.PortBindings {
+ for containerPb, hostPb := range ports {
var pm ocicni.PortMapping
pm.ContainerPort = int32(containerPb.Int())
for _, i := range hostPb {
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index b1cca2c9e..05be00864 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -453,6 +453,9 @@ func addNetNS(config *CreateConfig, g *generate.Generator) error {
} else if IsPod(string(netMode)) {
logrus.Debug("Using pod netmode, unless pod is not sharing")
return nil
+ } else if netMode.IsSlirp4netns() {
+ logrus.Debug("Using slirp4netns netmode")
+ return nil
} else if netMode.IsUserDefined() {
logrus.Debug("Using user defined netmode")
return nil
diff --git a/pkg/trust/trust.go b/pkg/trust/trust.go
new file mode 100644
index 000000000..efc760364
--- /dev/null
+++ b/pkg/trust/trust.go
@@ -0,0 +1,250 @@
+package trust
+
+import (
+ "bufio"
+ "encoding/base64"
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "unsafe"
+
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ yaml "gopkg.in/yaml.v2"
+)
+
+// PolicyContent struct for policy.json file
+type PolicyContent struct {
+ Default []RepoContent `json:"default"`
+ Transports TransportsContent `json:"transports"`
+}
+
+// RepoContent struct used under each repo
+type RepoContent struct {
+ Type string `json:"type"`
+ KeyType string `json:"keyType,omitempty"`
+ KeyPath string `json:"keyPath,omitempty"`
+ KeyData string `json:"keyData,omitempty"`
+ SignedIdentity json.RawMessage `json:"signedIdentity,omitempty"`
+}
+
+// RepoMap map repo name to policycontent for each repo
+type RepoMap map[string][]RepoContent
+
+// TransportsContent struct for content under "transports"
+type TransportsContent map[string]RepoMap
+
+// RegistryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
+// NOTE: Keep this in sync with docs/registries.d.md!
+type RegistryConfiguration struct {
+ DefaultDocker *RegistryNamespace `json:"default-docker"`
+ // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
+ Docker map[string]RegistryNamespace `json:"docker"`
+}
+
+// RegistryNamespace defines lookaside locations for a single namespace.
+type RegistryNamespace struct {
+ SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
+ SigStoreStaging string `json:"sigstore-staging"` // For writing only.
+}
+
+// DefaultPolicyPath returns a path to the default policy of the system.
+func DefaultPolicyPath(sys *types.SystemContext) string {
+ systemDefaultPolicyPath := "/etc/containers/policy.json"
+ if sys != nil {
+ if sys.SignaturePolicyPath != "" {
+ return sys.SignaturePolicyPath
+ }
+ if sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
+ }
+ }
+ return systemDefaultPolicyPath
+}
+
+// RegistriesDirPath returns a path to registries.d
+func RegistriesDirPath(sys *types.SystemContext) string {
+ systemRegistriesDirPath := "/etc/containers/registries.d"
+ if sys != nil {
+ if sys.RegistriesDirPath != "" {
+ return sys.RegistriesDirPath
+ }
+ if sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
+ }
+ }
+ return systemRegistriesDirPath
+}
+
+// LoadAndMergeConfig loads configuration files in dirPath
+func LoadAndMergeConfig(dirPath string) (*RegistryConfiguration, error) {
+ mergedConfig := RegistryConfiguration{Docker: map[string]RegistryNamespace{}}
+ dockerDefaultMergedFrom := ""
+ nsMergedFrom := map[string]string{}
+
+ dir, err := os.Open(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &mergedConfig, nil
+ }
+ return nil, err
+ }
+ configNames, err := dir.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+ for _, configName := range configNames {
+ if !strings.HasSuffix(configName, ".yaml") {
+ continue
+ }
+ configPath := filepath.Join(dirPath, configName)
+ configBytes, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ return nil, err
+ }
+ var config RegistryConfiguration
+ err = yaml.Unmarshal(configBytes, &config)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing %s", configPath)
+ }
+ if config.DefaultDocker != nil {
+ if mergedConfig.DefaultDocker != nil {
+ return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
+ dockerDefaultMergedFrom, configPath)
+ }
+ mergedConfig.DefaultDocker = config.DefaultDocker
+ dockerDefaultMergedFrom = configPath
+ }
+ for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
+ if _, ok := mergedConfig.Docker[nsName]; ok {
+ return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
+ nsName, nsMergedFrom[nsName], configPath)
+ }
+ mergedConfig.Docker[nsName] = nsConfig
+ nsMergedFrom[nsName] = configPath
+ }
+ }
+ return &mergedConfig, nil
+}
+
+// HaveMatchRegistry checks if trust settings for the registry have been configed in yaml file
+func HaveMatchRegistry(key string, registryConfigs *RegistryConfiguration) *RegistryNamespace {
+ searchKey := key
+ if !strings.Contains(searchKey, "/") {
+ val, exists := registryConfigs.Docker[searchKey]
+ if exists {
+ return &val
+ }
+ }
+ for range strings.Split(key, "/") {
+ val, exists := registryConfigs.Docker[searchKey]
+ if exists {
+ return &val
+ }
+ if strings.Contains(searchKey, "/") {
+ searchKey = searchKey[:strings.LastIndex(searchKey, "/")]
+ }
+ }
+ return nil
+}
+
+// CreateTmpFile creates a temp file under dir and writes the content into it
+func CreateTmpFile(dir, pattern string, content []byte) (string, error) {
+ tmpfile, err := ioutil.TempFile(dir, pattern)
+ if err != nil {
+ return "", err
+ }
+ defer tmpfile.Close()
+
+ if _, err := tmpfile.Write(content); err != nil {
+ return "", err
+
+ }
+ return tmpfile.Name(), nil
+}
+
+// GetGPGId return GPG identity, either bracketed <email> or ID string
+// comma separated if more than one key
+func GetGPGId(keys []string) string {
+ for _, k := range keys {
+ if _, err := os.Stat(k); err != nil {
+ decodeKey, err := base64.StdEncoding.DecodeString(k)
+ if err != nil {
+ logrus.Warnf("error decoding key data")
+ continue
+ }
+ tmpfileName, err := CreateTmpFile("/run/", "", decodeKey)
+ if err != nil {
+ logrus.Warnf("error creating key date temp file %s", err)
+ }
+ defer os.Remove(tmpfileName)
+ k = tmpfileName
+ }
+ cmd := exec.Command("gpg2", "--with-colons", k)
+ results, err := cmd.Output()
+ if err != nil {
+ logrus.Warnf("error get key identity: %s", err)
+ continue
+ }
+ resultsStr := *(*string)(unsafe.Pointer(&results))
+ scanner := bufio.NewScanner(strings.NewReader(resultsStr))
+ var parseduids []string
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "uid:") || strings.HasPrefix(line, "pub:") {
+ uid := strings.Split(line, ":")[9]
+ if uid == "" {
+ continue
+ }
+ parseduid := uid
+ if strings.Contains(uid, "<") && strings.Contains(uid, ">") {
+ parseduid = strings.SplitN(strings.SplitAfterN(uid, "<", 2)[1], ">", 2)[0]
+ }
+ parseduids = append(parseduids, parseduid)
+ }
+ }
+ return strings.Join(parseduids, ",")
+ }
+ return ""
+}
+
+// GetPolicyJSON return the struct to show policy.json in json format
+func GetPolicyJSON(policyContentStruct PolicyContent, systemRegistriesDirPath string) (map[string]map[string]interface{}, error) {
+ registryConfigs, err := LoadAndMergeConfig(systemRegistriesDirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ policyJSON := make(map[string]map[string]interface{})
+ if len(policyContentStruct.Default) > 0 {
+ policyJSON["* (default)"] = make(map[string]interface{})
+ policyJSON["* (default)"]["type"] = policyContentStruct.Default[0].Type
+ }
+ for transname, transval := range policyContentStruct.Transports {
+ for repo, repoval := range transval {
+ policyJSON[repo] = make(map[string]interface{})
+ policyJSON[repo]["type"] = repoval[0].Type
+ policyJSON[repo]["transport"] = transname
+ for _, repoele := range repoval {
+ keyarr := []string{}
+ if len(repoele.KeyPath) > 0 {
+ keyarr = append(keyarr, repoele.KeyPath)
+ }
+ if len(repoele.KeyData) > 0 {
+ keyarr = append(keyarr, string(repoele.KeyData))
+ }
+ policyJSON[repo]["keys"] = keyarr
+ }
+ policyJSON[repo]["sigstore"] = ""
+ registryNamespace := HaveMatchRegistry(repo, registryConfigs)
+ if registryNamespace != nil {
+ policyJSON[repo]["sigstore"] = registryNamespace.SigStore
+ }
+ }
+ }
+ return policyJSON, nil
+}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 69f49e72a..f567f2675 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -3,11 +3,13 @@ package util
import (
"fmt"
"os"
+ "os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
+ "github.com/BurntSushi/toml"
"github.com/containers/image/types"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
@@ -248,49 +250,122 @@ func GetRootlessRuntimeDir() (string, error) {
return runtimeDir, nil
}
-// GetRootlessStorageOpts returns the storage ops for containers running as non root
-func GetRootlessStorageOpts() (storage.StoreOptions, error) {
- var opts storage.StoreOptions
-
+// GetRootlessDirInfo returns the parent path of where the storage for containers and
+// volumes will be in rootless mode
+func GetRootlessDirInfo() (string, string, error) {
rootlessRuntime, err := GetRootlessRuntimeDir()
if err != nil {
- return opts, err
+ return "", "", err
}
- opts.RunRoot = rootlessRuntime
dataDir := os.Getenv("XDG_DATA_HOME")
if dataDir == "" {
home := os.Getenv("HOME")
if home == "" {
- return opts, fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
+ return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
}
// runc doesn't like symlinks in the rootfs path, and at least
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
- return opts, errors.Wrapf(err, "cannot resolve %s", home)
+ return "", "", errors.Wrapf(err, "cannot resolve %s", home)
}
dataDir = filepath.Join(resolvedHome, ".local", "share")
}
+ return dataDir, rootlessRuntime, nil
+}
+
+// GetRootlessStorageOpts returns the storage opts for containers running as non root
+func GetRootlessStorageOpts() (storage.StoreOptions, error) {
+ var opts storage.StoreOptions
+
+ dataDir, rootlessRuntime, err := GetRootlessDirInfo()
+ if err != nil {
+ return opts, err
+ }
+ opts.RunRoot = rootlessRuntime
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
- opts.GraphDriverName = "vfs"
+ if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
+ opts.GraphDriverName = "overlay"
+ opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
+ } else {
+ opts.GraphDriverName = "vfs"
+ }
return opts, nil
}
-// GetDefaultStoreOptions returns the storage ops for containers
-func GetDefaultStoreOptions() (storage.StoreOptions, error) {
+// GetRootlessVolumeInfo returns where all the name volumes will be created in rootless mode
+func GetRootlessVolumeInfo() (string, error) {
+ dataDir, _, err := GetRootlessDirInfo()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(dataDir, "containers", "storage", "volumes"), nil
+}
+
+type tomlOptionsConfig struct {
+ MountProgram string `toml:"mount_program"`
+}
+
+type tomlConfig struct {
+ Storage struct {
+ Driver string `toml:"driver"`
+ RunRoot string `toml:"runroot"`
+ GraphRoot string `toml:"graphroot"`
+ Options struct{ tomlOptionsConfig } `toml:"options"`
+ } `toml:"storage"`
+}
+
+func getTomlStorage(storeOptions *storage.StoreOptions) *tomlConfig {
+ config := new(tomlConfig)
+
+ config.Storage.Driver = storeOptions.GraphDriverName
+ config.Storage.RunRoot = storeOptions.RunRoot
+ config.Storage.GraphRoot = storeOptions.GraphRoot
+ for _, i := range storeOptions.GraphDriverOptions {
+ s := strings.Split(i, "=")
+ if s[0] == "overlay.mount_program" {
+ config.Storage.Options.MountProgram = s[1]
+ }
+ }
+
+ return config
+}
+
+// GetDefaultStoreOptions returns the storage ops for containers and the volume path
+// for the volume API
+// It also returns the path where all named volumes will be created using the volume API
+func GetDefaultStoreOptions() (storage.StoreOptions, string, error) {
storageOpts := storage.DefaultStoreOptions
+ volumePath := "/var/lib/containers/storage"
if rootless.IsRootless() {
var err error
storageOpts, err = GetRootlessStorageOpts()
if err != nil {
- return storageOpts, err
+ return storageOpts, volumePath, err
+ }
+ volumePath, err = GetRootlessVolumeInfo()
+ if err != nil {
+ return storageOpts, volumePath, err
}
storageConf := filepath.Join(os.Getenv("HOME"), ".config/containers/storage.conf")
if _, err := os.Stat(storageConf); err == nil {
storage.ReloadConfigurationFile(storageConf, &storageOpts)
+ } else if os.IsNotExist(err) {
+ os.MkdirAll(filepath.Dir(storageConf), 0755)
+ file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return storageOpts, volumePath, errors.Wrapf(err, "cannot open %s", storageConf)
+ }
+
+ tomlConfiguration := getTomlStorage(&storageOpts)
+ defer file.Close()
+ enc := toml.NewEncoder(file)
+ if err := enc.Encode(tomlConfiguration); err != nil {
+ os.Remove(storageConf)
+ }
}
}
- return storageOpts, nil
+ return storageOpts, volumePath, nil
}
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index f517e9b6e..07d981786 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -278,6 +278,18 @@ func (i *LibpodAPI) RestartContainer(call iopodman.VarlinkCall, name string, tim
return call.ReplyRestartContainer(ctr.ID())
}
+// ContainerExists looks in local storage for the existence of a container
+func (i *LibpodAPI) ContainerExists(call iopodman.VarlinkCall, name string) error {
+ _, err := i.Runtime.LookupContainer(name)
+ if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ return call.ReplyContainerExists(1)
+ }
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyContainerExists(0)
+}
+
// KillContainer kills a running container. If you want to use the default SIGTERM signal, just send a -1
// for the signal arg.
func (i *LibpodAPI) KillContainer(call iopodman.VarlinkCall, name string, signal int64) error {
@@ -413,3 +425,40 @@ func (i *LibpodAPI) GetAttachSockets(call iopodman.VarlinkCall, name string) err
}
return call.ReplyGetAttachSockets(s)
}
+
+// ContainerCheckpoint ...
+func (i *LibpodAPI) ContainerCheckpoint(call iopodman.VarlinkCall, name string, keep, leaveRunning, tcpEstablished bool) error {
+ ctx := getContext()
+ ctr, err := i.Runtime.LookupContainer(name)
+ if err != nil {
+ return call.ReplyContainerNotFound(name)
+ }
+
+ options := libpod.ContainerCheckpointOptions{
+ Keep: keep,
+ TCPEstablished: tcpEstablished,
+ KeepRunning: leaveRunning,
+ }
+ if err := ctr.Checkpoint(ctx, options); err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyContainerCheckpoint(ctr.ID())
+}
+
+// ContainerRestore ...
+func (i *LibpodAPI) ContainerRestore(call iopodman.VarlinkCall, name string, keep, tcpEstablished bool) error {
+ ctx := getContext()
+ ctr, err := i.Runtime.LookupContainer(name)
+ if err != nil {
+ return call.ReplyContainerNotFound(name)
+ }
+
+ options := libpod.ContainerCheckpointOptions{
+ Keep: keep,
+ TCPEstablished: tcpEstablished,
+ }
+ if err := ctr.Restore(ctx, options); err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyContainerRestore(ctr.ID())
+}
diff --git a/pkg/varlinkapi/containers_create.go b/pkg/varlinkapi/containers_create.go
index ca1a57048..bb6273fd1 100644
--- a/pkg/varlinkapi/containers_create.go
+++ b/pkg/varlinkapi/containers_create.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/inspect"
"github.com/containers/libpod/pkg/namespaces"
+ "github.com/containers/libpod/pkg/rootless"
cc "github.com/containers/libpod/pkg/spec"
"github.com/containers/libpod/pkg/util"
"github.com/docker/docker/pkg/signal"
@@ -24,7 +25,7 @@ func (i *LibpodAPI) CreateContainer(call iopodman.VarlinkCall, config iopodman.C
rtc := i.Runtime.GetConfig()
ctx := getContext()
- newImage, err := i.Runtime.ImageRuntime().New(ctx, config.Image, rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{}, false, false)
+ newImage, err := i.Runtime.ImageRuntime().New(ctx, config.Image, rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{}, false)
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
@@ -126,7 +127,11 @@ func varlinkCreateToCreateConfig(ctx context.Context, create iopodman.Create, ru
// NETWORK MODE
networkMode := create.Net_mode
if networkMode == "" {
- networkMode = "bridge"
+ if rootless.IsRootless() {
+ networkMode = "slirp4netns"
+ } else {
+ networkMode = "bridge"
+ }
}
// WORKING DIR
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index d14c61c39..5e4cb4ccb 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "os"
"path/filepath"
"strings"
"time"
@@ -12,13 +13,17 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
"github.com/containers/image/docker"
+ dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/manifest"
+ "github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/util"
+ "github.com/containers/libpod/utils"
"github.com/docker/go-units"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -127,7 +132,7 @@ func (i *LibpodAPI) BuildImage(call iopodman.VarlinkCall, config iopodman.BuildI
if config.Pull_always {
pullPolicy = imagebuildah.PullAlways
}
- manifestType := "oci"
+ manifestType := "oci" //nolint
if config.Image_format != "" {
manifestType = config.Image_format
}
@@ -271,6 +276,9 @@ func (i *LibpodAPI) InspectImage(call iopodman.VarlinkCall, name string) error {
return call.ReplyImageNotFound(name)
}
inspectInfo, err := newImage.Inspect(getContext())
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
b, err := json.Marshal(inspectInfo)
if err != nil {
return call.ReplyErrorOccurred(fmt.Sprintf("unable to serialize"))
@@ -305,8 +313,12 @@ func (i *LibpodAPI) HistoryImage(call iopodman.VarlinkCall, name string) error {
}
// PushImage pushes an local image to registry
-// TODO We need to add options for signing, credentials, tls, and multi-tag
-func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, tlsVerify bool) error {
+func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, tlsVerify bool, signaturePolicy, creds, certDir string, compress bool, format string, removeSignatures bool, signBy string) error {
+ var (
+ registryCreds *types.DockerAuthConfig
+ manifestType string
+ )
+
newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
return call.ReplyImageNotFound(err.Error())
@@ -315,14 +327,38 @@ func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, tlsVe
if tag != "" {
destname = tag
}
-
+ if creds != "" {
+ creds, err := util.ParseRegistryCreds(creds)
+ if err != nil {
+ return err
+ }
+ registryCreds = creds
+ }
dockerRegistryOptions := image.DockerRegistryOptions{
- DockerInsecureSkipTLSVerify: !tlsVerify,
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: certDir,
+ }
+ if !tlsVerify {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ }
+ if format != "" {
+ switch format {
+ case "oci": //nolint
+ manifestType = v1.MediaTypeImageManifest
+ case "v2s1":
+ manifestType = manifest.DockerV2Schema1SignedMediaType
+ case "v2s2", "docker":
+ manifestType = manifest.DockerV2Schema2MediaType
+ default:
+ return call.ReplyErrorOccurred(fmt.Sprintf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", format))
+ }
+ }
+ so := image.SigningOptions{
+ RemoveSignatures: removeSignatures,
+ SignBy: signBy,
}
- so := image.SigningOptions{}
-
- if err := newImage.PushImageToHeuristicDestination(getContext(), destname, "", "", "", nil, false, so, &dockerRegistryOptions, false, nil); err != nil {
+ if err := newImage.PushImageToHeuristicDestination(getContext(), destname, manifestType, "", signaturePolicy, nil, compress, so, &dockerRegistryOptions, nil); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
return call.ReplyPushImage(newImage.ID())
@@ -421,7 +457,7 @@ func (i *LibpodAPI) Commit(call iopodman.VarlinkCall, name, imageName string, ch
sc := image.GetSystemContext(i.Runtime.GetConfig().SignaturePolicyPath, "", false)
var mimeType string
switch manifestType {
- case "oci", "":
+ case "oci", "": //nolint
mimeType = buildah.OCIv1ImageManifest
case "docker":
mimeType = manifest.DockerV2Schema2MediaType
@@ -482,18 +518,96 @@ func (i *LibpodAPI) ExportImage(call iopodman.VarlinkCall, name, destination str
return err
}
- if err := newImage.PushImageToHeuristicDestination(getContext(), destination, "", "", "", nil, compress, image.SigningOptions{}, &image.DockerRegistryOptions{}, false, additionalTags); err != nil {
+ if err := newImage.PushImageToHeuristicDestination(getContext(), destination, "", "", "", nil, compress, image.SigningOptions{}, &image.DockerRegistryOptions{}, additionalTags); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
return call.ReplyExportImage(newImage.ID())
}
// PullImage pulls an image from a registry to the image store.
-// TODO This implementation is incomplete
-func (i *LibpodAPI) PullImage(call iopodman.VarlinkCall, name string) error {
- newImage, err := i.Runtime.ImageRuntime().New(getContext(), name, "", "", nil, &image.DockerRegistryOptions{}, image.SigningOptions{}, true, false)
+func (i *LibpodAPI) PullImage(call iopodman.VarlinkCall, name string, certDir, creds, signaturePolicy string, tlsVerify bool) error {
+ var (
+ registryCreds *types.DockerAuthConfig
+ imageID string
+ )
+ if creds != "" {
+ creds, err := util.ParseRegistryCreds(creds)
+ if err != nil {
+ return err
+ }
+ registryCreds = creds
+ }
+
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: certDir,
+ }
+ if tlsVerify {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify)
+ }
+
+ so := image.SigningOptions{}
+
+ if strings.HasPrefix(name, dockerarchive.Transport.Name()+":") {
+ srcRef, err := alltransports.ParseImageName(name)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing %q", name)
+ }
+ newImage, err := i.Runtime.ImageRuntime().LoadFromArchiveReference(getContext(), srcRef, signaturePolicy, nil)
+ if err != nil {
+ return errors.Wrapf(err, "error pulling image from %q", name)
+ }
+ imageID = newImage[0].ID()
+ } else {
+ newImage, err := i.Runtime.ImageRuntime().New(getContext(), name, signaturePolicy, "", nil, &dockerRegistryOptions, so, false)
+ if err != nil {
+ return call.ReplyErrorOccurred(fmt.Sprintf("unable to pull %s: %s", name, err.Error()))
+ }
+ imageID = newImage.ID()
+ }
+ return call.ReplyPullImage(imageID)
+}
+
+// ImageExists returns bool as to whether the input image exists in local storage
+func (i *LibpodAPI) ImageExists(call iopodman.VarlinkCall, name string) error {
+ _, err := i.Runtime.ImageRuntime().NewFromLocal(name)
+ if errors.Cause(err) == image.ErrNoSuchImage {
+ return call.ReplyImageExists(1)
+ }
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyImageExists(0)
+}
+
+// ContainerRunlabel ...
+func (i *LibpodAPI) ContainerRunlabel(call iopodman.VarlinkCall, input iopodman.Runlabel) error {
+ ctx := getContext()
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerCertPath: input.CertDir,
+ }
+ if !input.TlsVerify {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ }
+
+ stdErr := os.Stderr
+ stdOut := os.Stdout
+ stdIn := os.Stdin
+
+ runLabel, imageName, err := shared.GetRunlabel(input.Label, input.Image, ctx, i.Runtime, input.Pull, input.Creds, dockerRegistryOptions, input.Authfile, input.SignaturePolicyPath, nil)
+ if err != nil {
+ return err
+ }
+ if runLabel == "" {
+ return nil
+ }
+
+ cmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, input.Name, input.Opts, input.ExtraArgs)
if err != nil {
- return call.ReplyErrorOccurred(fmt.Sprintf("unable to pull %s: %s", name, err.Error()))
+ return err
+ }
+ if err := utils.ExecCmdWithStdStreams(stdIn, stdOut, stdErr, env, cmd[0], cmd[1:]...); err != nil {
+ return call.ReplyErrorOccurred(err.Error())
}
- return call.ReplyPullImage(newImage.ID())
+ return call.ReplyContainerRunlabel()
}
diff --git a/pkg/varlinkapi/mount.go b/pkg/varlinkapi/mount.go
new file mode 100644
index 000000000..84e6b2709
--- /dev/null
+++ b/pkg/varlinkapi/mount.go
@@ -0,0 +1,49 @@
+package varlinkapi
+
+import (
+ "github.com/containers/libpod/cmd/podman/varlink"
+)
+
+// ListContainerMounts ...
+func (i *LibpodAPI) ListContainerMounts(call iopodman.VarlinkCall) error {
+ var mounts []string
+ allContainers, err := i.Runtime.GetAllContainers()
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ for _, container := range allContainers {
+ mounted, mountPoint, err := container.Mounted()
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if mounted {
+ mounts = append(mounts, mountPoint)
+ }
+ }
+ return call.ReplyListContainerMounts(mounts)
+}
+
+// MountContainer ...
+func (i *LibpodAPI) MountContainer(call iopodman.VarlinkCall, name string) error {
+ container, err := i.Runtime.LookupContainer(name)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ path, err := container.Mount()
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyMountContainer(path)
+}
+
+// UnmountContainer ...
+func (i *LibpodAPI) UnmountContainer(call iopodman.VarlinkCall, name string, force bool) error {
+ container, err := i.Runtime.LookupContainer(name)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if err := container.Unmount(force); err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyUnmountContainer()
+}
diff --git a/pkg/varlinkapi/pods.go b/pkg/varlinkapi/pods.go
index 7930a956f..6e758786a 100644
--- a/pkg/varlinkapi/pods.go
+++ b/pkg/varlinkapi/pods.go
@@ -2,6 +2,7 @@ package varlinkapi
import (
"encoding/json"
+ "github.com/containers/libpod/pkg/rootless"
"syscall"
"github.com/containers/libpod/cmd/podman/shared"
@@ -12,6 +13,10 @@ import (
// CreatePod ...
func (i *LibpodAPI) CreatePod(call iopodman.VarlinkCall, create iopodman.PodCreate) error {
var options []libpod.PodCreateOption
+
+ if create.InfraCommand != "" || create.InfraImage != "" {
+ return call.ReplyErrorOccurred("the infra-command and infra-image options are not supported yet")
+ }
if create.CgroupParent != "" {
options = append(options, libpod.WithPodCgroupParent(create.CgroupParent))
}
@@ -27,6 +32,21 @@ func (i *LibpodAPI) CreatePod(call iopodman.VarlinkCall, create iopodman.PodCrea
if len(create.Share) == 0 && create.Infra {
return call.ReplyErrorOccurred("You must share kernel namespaces to run an infra container")
}
+
+ if len(create.Publish) > 0 {
+ if !create.Infra {
+ return call.ReplyErrorOccurred("you must have an infra container to publish port bindings to the host")
+ }
+ if rootless.IsRootless() {
+ return call.ReplyErrorOccurred("rootless networking does not allow port binding to the host")
+ }
+ portBindings, err := shared.CreatePortBindings(create.Publish)
+ if err != nil {
+ return err
+ }
+ options = append(options, libpod.WithInfraContainerPorts(portBindings))
+
+ }
if create.Infra {
options = append(options, libpod.WithInfraContainer())
nsOptions, err := shared.GetNamespaceOptions(create.Share)
@@ -120,12 +140,12 @@ func (i *LibpodAPI) StartPod(call iopodman.VarlinkCall, name string) error {
}
// StopPod ...
-func (i *LibpodAPI) StopPod(call iopodman.VarlinkCall, name string) error {
+func (i *LibpodAPI) StopPod(call iopodman.VarlinkCall, name string, timeout int64) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
return call.ReplyPodNotFound(name)
}
- ctrErrs, err := pod.Stop(getContext(), true)
+ ctrErrs, err := pod.StopWithTimeout(getContext(), true, int(timeout))
callErr := handlePodCall(call, pod, ctrErrs, err)
if callErr != nil {
return err
diff --git a/test/README.md b/test/README.md
index a068bb4f5..fd72ecd00 100644
--- a/test/README.md
+++ b/test/README.md
@@ -1,8 +1,33 @@
![PODMAN logo](../logo/podman-logo-source.svg)
-# Integration Tests
+# Test utils
+Test utils provide common functions and structs for testing. It includes two structs:
+* `PodmanTest`: Handle the *podman* command and other global resources like temporary
+directory. It provides basic methods, like checking podman image and pod status. Test
+suites should create their owner test *struct* as a composite of `PodmanTest`, and their
+owner PodmanMakeOptions().
+
+* `PodmanSession`: Store execution session data and related *methods*. Such like get command
+output and so on. It can be used directly in the test suite, only embed it to your owner
+session struct if you need expend it.
+
+## Unittest for test/utils
+To ensure neither *tests* nor *utils* break, There are unit-tests for each *functions* and
+*structs* in `test/utils`. When you adding functions or structs to this *package*, please
+update both unit-tests for it and this documentation.
+
+### Run unit test for test/utils
+Run unit test for test/utils.
+
+```
+make localunit
+```
+
+## Structure of the test utils and test suites
+The test *utils* package is at the same level of test suites. Each test suites also have their
+owner common functions and structs stored in `libpod_suite_test.go`.
-Our primary means of performing integration testing for libpod is with the
-[Ginkgo](https://github.com/onsi/ginkgo) BDD testing framework. This allows
+# Ginkgo test framework
+[Ginkgo](https://github.com/onsi/ginkgo) is a BDD testing framework. This allows
us to use native Golang to perform our tests and there is a strong affiliation
between Ginkgo and the Go test framework.
@@ -32,8 +57,16 @@ The gomega sources can be simply installed with the command:
GOPATH=~/go go get github.com/onsi/gomega/...
```
-### Running the integration tests
+# Integration Tests
+Test suite for integration test for podman command line. It has its own structs:
+* `PodmanTestIntegration`: Integration test *struct* as a composite of `PodmanTest`. It
+set up the global options for *podman* command to ignore the environment influence from
+different test system.
+
+* `PodmanSessionIntegration`: This *struct* has it own *methods* for checking command
+output with given format JSON by using *structs* defined in inspect package.
+## Running the integration tests
You can run the entire suite of integration tests with the following command:
```
@@ -46,7 +79,7 @@ switch is optional.
You can run a single file of integration tests using the go test command:
```
-GOPATH=~/go go test -v test/e2e/libpod_suite_test.go test/e2e/your_test.go
+GOPATH=~/go go test -v test/e2e/libpod_suite_test.go test/e2e/config.go test/e2e/config_amd64.go test/e2e/your_test.go
```
#### Run all tests like PAPR
@@ -67,3 +100,21 @@ make shell
```
This will run a container and give you a shell and you can follow the instructions above.
+
+# System test
+System tests are used for testing the *podman* CLI in the context of a complete system. It
+requires that *podman*, all dependencies, and configurations are in place. The intention of
+system testing is to match as closely as possible with real-world user/developer use-cases
+and environments. The orchestration of the environments and tests is left to external
+tooling.
+
+* `PodmanTestSystem`: System test *struct* as a composite of `PodmanTest`. It will not add any
+options to the command by default. When you run system test, you can set GLOBALOPTIONS,
+PODMAN_SUBCMD_OPTIONS or PODMAN_BINARY in ENV to run the test suite for different test matrices.
+
+## Run system test
+You can run the test with following command:
+
+```
+make localsystem
+```
diff --git a/test/bin2img/bin2img.go b/test/bin2img/bin2img.go
index 644832c77..ed493dcc1 100644
--- a/test/bin2img/bin2img.go
+++ b/test/bin2img/bin2img.go
@@ -9,6 +9,7 @@ import (
"os"
"runtime"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/storage"
"github.com/containers/image/types"
sstorage "github.com/containers/storage"
@@ -156,7 +157,7 @@ func main() {
os.Exit(1)
}
defer img.Close()
- layer, err := img.PutBlob(ctx, layerBuffer, layerInfo, false)
+ layer, err := img.PutBlob(ctx, layerBuffer, layerInfo, blobinfocache.NewMemoryCache(), false)
if err != nil {
logrus.Errorf("error preparing to write image: %v", err)
os.Exit(1)
@@ -184,7 +185,7 @@ func main() {
Digest: digest.Canonical.FromBytes(cbytes),
Size: int64(len(cbytes)),
}
- configInfo, err = img.PutBlob(ctx, bytes.NewBuffer(cbytes), configInfo, false)
+ configInfo, err = img.PutBlob(ctx, bytes.NewBuffer(cbytes), configInfo, blobinfocache.NewMemoryCache(), false)
if err != nil {
logrus.Errorf("error saving configuration: %v", err)
os.Exit(1)
diff --git a/test/e2e/attach_test.go b/test/e2e/attach_test.go
index 245ccf649..6bc576461 100644
--- a/test/e2e/attach_test.go
+++ b/test/e2e/attach_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman attach", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman attach", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 928a76324..57322643e 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -2,9 +2,12 @@ package integration
import (
"fmt"
+ "net"
"os"
+ "strconv"
"github.com/containers/libpod/pkg/criu"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +16,7 @@ var _ = Describe("Podman checkpoint", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,11 +24,16 @@ var _ = Describe("Podman checkpoint", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
if !criu.CheckForCriu() {
Skip("CRIU is missing or too old.")
}
+ hostInfo := podmanTest.Host
+ hostDistVer, _ := strconv.Atoi(hostInfo.Version)
+ if hostInfo.Distribution == "fedora" && hostDistVer <= 29 {
+ Skip("Checkpoint tests appear to fail on older (<30) Fedora versions .")
+ }
})
AfterEach(func() {
@@ -125,4 +133,164 @@ var _ = Describe("Podman checkpoint", func() {
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
})
+
+ It("podman checkpoint latest running container", func() {
+ session1 := podmanTest.Podman([]string{"run", "-it", "--security-opt", "seccomp=unconfined", "--name", "first", "-d", ALPINE, "top"})
+ session1.WaitWithDefaultTimeout()
+ Expect(session1.ExitCode()).To(Equal(0))
+
+ session2 := podmanTest.Podman([]string{"run", "-it", "--security-opt", "seccomp=unconfined", "--name", "second", "-d", ALPINE, "top"})
+ session2.WaitWithDefaultTimeout()
+ Expect(session2.ExitCode()).To(Equal(0))
+
+ result := podmanTest.Podman([]string{"container", "checkpoint", "-l"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+
+ ps := podmanTest.Podman([]string{"ps", "-q", "--no-trunc"})
+ ps.WaitWithDefaultTimeout()
+ Expect(ps.ExitCode()).To(Equal(0))
+ Expect(ps.LineInOutputContains(session1.OutputToString())).To(BeTrue())
+ Expect(ps.LineInOutputContains(session2.OutputToString())).To(BeFalse())
+
+ result = podmanTest.Podman([]string{"container", "restore", "-l"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+ Expect(podmanTest.GetContainerStatus()).To(Not(ContainSubstring("Exited")))
+
+ result = podmanTest.Podman([]string{"rm", "-fa"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
+
+ It("podman checkpoint all running container", func() {
+ session1 := podmanTest.Podman([]string{"run", "-it", "--security-opt", "seccomp=unconfined", "--name", "first", "-d", ALPINE, "top"})
+ session1.WaitWithDefaultTimeout()
+ Expect(session1.ExitCode()).To(Equal(0))
+
+ session2 := podmanTest.Podman([]string{"run", "-it", "--security-opt", "seccomp=unconfined", "--name", "second", "-d", ALPINE, "top"})
+ session2.WaitWithDefaultTimeout()
+ Expect(session2.ExitCode()).To(Equal(0))
+
+ result := podmanTest.Podman([]string{"container", "checkpoint", "-a"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+
+ ps := podmanTest.Podman([]string{"ps", "-q", "--no-trunc"})
+ ps.WaitWithDefaultTimeout()
+ Expect(ps.ExitCode()).To(Equal(0))
+ Expect(ps.LineInOutputContains(session1.OutputToString())).To(BeFalse())
+ Expect(ps.LineInOutputContains(session2.OutputToString())).To(BeFalse())
+
+ result = podmanTest.Podman([]string{"container", "restore", "-a"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+ Expect(podmanTest.GetContainerStatus()).To(Not(ContainSubstring("Exited")))
+
+ result = podmanTest.Podman([]string{"rm", "-fa"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
+
+ It("podman checkpoint container with established tcp connections", func() {
+ Skip("Seems to not work (yet) in CI")
+ podmanTest.RestoreArtifact(redis)
+ session := podmanTest.Podman([]string{"run", "-it", "--security-opt", "seccomp=unconfined", "--network", "host", "-d", redis})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Open a network connection to the redis server
+ conn, err := net.Dial("tcp", "127.0.0.1:6379")
+ if err != nil {
+ os.Exit(1)
+ }
+ // This should fail as the container has established TCP connections
+ result := podmanTest.Podman([]string{"container", "checkpoint", "-l"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(125))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // Now it should work thanks to "--tcp-established"
+ result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "--tcp-established"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited"))
+
+ // Restore should fail as the checkpoint image contains established TCP connections
+ result = podmanTest.Podman([]string{"container", "restore", "-l"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(125))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited"))
+
+ // Now it should work thanks to "--tcp-established"
+ result = podmanTest.Podman([]string{"container", "restore", "-l", "--tcp-established"})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ result = podmanTest.Podman([]string{"rm", "-fa"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+
+ conn.Close()
+ })
+
+ It("podman checkpoint with --leave-running", func() {
+ session := podmanTest.Podman([]string{"run", "-it", "--security-opt", "seccomp=unconfined", "-d", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
+
+ // Checkpoint container, but leave it running
+ result := podmanTest.Podman([]string{"container", "checkpoint", "--leave-running", cid})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ // Make sure it is still running
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ // Stop the container
+ result = podmanTest.Podman([]string{"container", "stop", cid})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited"))
+
+ // Restore the stopped container from the previous checkpoint
+ result = podmanTest.Podman([]string{"container", "restore", cid})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ result = podmanTest.Podman([]string{"rm", "-fa"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
+
})
diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go
index c0e050da4..4ee5061f0 100644
--- a/test/e2e/commit_test.go
+++ b/test/e2e/commit_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman commit", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman commit", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/create_staticip_test.go b/test/e2e/create_staticip_test.go
new file mode 100644
index 000000000..17ac5cb40
--- /dev/null
+++ b/test/e2e/create_staticip_test.go
@@ -0,0 +1,88 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman create with --ip flag", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ // Cleanup the CNI networks used by the tests
+ os.RemoveAll("/var/lib/cni/networks/podman")
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("Podman create --ip with garbage address", func() {
+ result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", "114232346", ALPINE, "ls"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).ToNot(Equal(0))
+ })
+
+ It("Podman create --ip with v6 address", func() {
+ result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", "2001:db8:bad:beef::1", ALPINE, "ls"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).ToNot(Equal(0))
+ })
+
+ It("Podman create --ip with non-allocatable IP", func() {
+ result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", "203.0.113.124", ALPINE, "ls"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+
+ result = podmanTest.Podman([]string{"start", "test"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).ToNot(Equal(0))
+ })
+
+ It("Podman create with specified static IP has correct IP", func() {
+ result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", "10.88.64.128", ALPINE, "ip", "addr"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+
+ result = podmanTest.Podman([]string{"start", "test"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+
+ result = podmanTest.Podman([]string{"logs", "test"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.OutputToString()).To(ContainSubstring("10.88.64.128/16"))
+ })
+
+ It("Podman create two containers with the same IP", func() {
+ result := podmanTest.Podman([]string{"create", "--name", "test1", "--ip", "10.88.64.128", ALPINE, "sleep", "999"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ result = podmanTest.Podman([]string{"create", "--name", "test2", "--ip", "10.88.64.128", ALPINE, "ip", "addr"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ result = podmanTest.Podman([]string{"start", "test1"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ result = podmanTest.Podman([]string{"start", "test2"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).ToNot(Equal(0))
+ })
+})
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index c36a8e31f..684a7cd88 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -3,7 +3,9 @@ package integration
import (
"fmt"
"os"
+ "path/filepath"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +14,7 @@ var _ = Describe("Podman create", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +22,7 @@ var _ = Describe("Podman create", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -109,4 +111,93 @@ var _ = Describe("Podman create", func() {
Expect(result.ExitCode()).To(Equal(0))
Expect(result.OutputToString()).To(Equal("/bin/foo -c"))
})
+
+ It("podman create --mount flag with multiple mounts", func() {
+ vol1 := filepath.Join(podmanTest.TempDir, "vol-test1")
+ err := os.MkdirAll(vol1, 0755)
+ Expect(err).To(BeNil())
+ vol2 := filepath.Join(podmanTest.TempDir, "vol-test2")
+ err = os.MkdirAll(vol2, 0755)
+ Expect(err).To(BeNil())
+
+ session := podmanTest.Podman([]string{"create", "--name", "test", "--mount", "type=bind,src=" + vol1 + ",target=/myvol1,z", "--mount", "type=bind,src=" + vol2 + ",target=/myvol2,z", ALPINE, "touch", "/myvol2/foo.txt"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"logs", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).ToNot(ContainSubstring("cannot touch"))
+ })
+
+ It("podman create with --mount flag", func() {
+ if podmanTest.Host.Arch == "ppc64le" {
+ Skip("skip failing test on ppc64le")
+ }
+ mountPath := filepath.Join(podmanTest.TempDir, "secrets")
+ os.Mkdir(mountPath, 0755)
+ session := podmanTest.Podman([]string{"create", "--name", "test", "--rm", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"logs", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("/create/test rw"))
+
+ session = podmanTest.Podman([]string{"create", "--name", "test_ro", "--rm", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,ro", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"start", "test_ro"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"logs", "test_ro"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("/create/test ro"))
+
+ session = podmanTest.Podman([]string{"create", "--name", "test_shared", "--rm", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,shared", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"start", "test_shared"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"logs", "test_shared"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ found, matches := session.GrepString("/create/test")
+ Expect(found).Should(BeTrue())
+ Expect(matches[0]).To(ContainSubstring("rw"))
+ Expect(matches[0]).To(ContainSubstring("shared"))
+
+ mountPath = filepath.Join(podmanTest.TempDir, "scratchpad")
+ os.Mkdir(mountPath, 0755)
+ session = podmanTest.Podman([]string{"create", "--name", "test_tmpfs", "--rm", "--mount", "type=tmpfs,target=/create/test", ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"start", "test_tmpfs"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"logs", "test_tmpfs"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("/create/test rw,nosuid,nodev,noexec,relatime - tmpfs"))
+ })
+
+ It("podman create --pod automatically", func() {
+ session := podmanTest.Podman([]string{"create", "--pod", "new:foobar", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ check := podmanTest.Podman([]string{"pod", "ps", "--no-trunc"})
+ check.WaitWithDefaultTimeout()
+ match, _ := check.GrepString("foobar")
+ Expect(match).To(BeTrue())
+ })
})
diff --git a/test/e2e/diff_test.go b/test/e2e/diff_test.go
index a83bb14da..2c0060dd5 100644
--- a/test/e2e/diff_test.go
+++ b/test/e2e/diff_test.go
@@ -5,6 +5,7 @@ import (
"os"
"sort"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman diff", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman diff", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
index 250e08704..fec80717f 100644
--- a/test/e2e/exec_test.go
+++ b/test/e2e/exec_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman exec", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman exec", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/exists_test.go b/test/e2e/exists_test.go
new file mode 100644
index 000000000..d9652de4b
--- /dev/null
+++ b/test/e2e/exists_test.go
@@ -0,0 +1,117 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman image|container exists", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+
+ })
+ It("podman image exists in local storage by fq name", func() {
+ session := podmanTest.Podman([]string{"image", "exists", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman image exists in local storage by short name", func() {
+ session := podmanTest.Podman([]string{"image", "exists", "alpine"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman image does not exist in local storage", func() {
+ session := podmanTest.Podman([]string{"image", "exists", "alpine9999"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(1))
+ })
+ It("podman container exists in local storage by name", func() {
+ setup := podmanTest.RunTopContainer("foobar")
+ setup.WaitWithDefaultTimeout()
+ Expect(setup.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"container", "exists", "foobar"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman container exists in local storage by container ID", func() {
+ setup := podmanTest.RunTopContainer("")
+ setup.WaitWithDefaultTimeout()
+ Expect(setup.ExitCode()).To(Equal(0))
+ cid := setup.OutputToString()
+
+ session := podmanTest.Podman([]string{"container", "exists", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman container exists in local storage by short container ID", func() {
+ setup := podmanTest.RunTopContainer("")
+ setup.WaitWithDefaultTimeout()
+ Expect(setup.ExitCode()).To(Equal(0))
+ cid := setup.OutputToString()[0:12]
+
+ session := podmanTest.Podman([]string{"container", "exists", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman container does not exist in local storage", func() {
+ session := podmanTest.Podman([]string{"container", "exists", "foobar"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(1))
+ })
+
+ It("podman pod exists in local storage by name", func() {
+ setup, rc, _ := podmanTest.CreatePod("foobar")
+ setup.WaitWithDefaultTimeout()
+ Expect(rc).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"pod", "exists", "foobar"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman pod exists in local storage by container ID", func() {
+ setup, rc, podID := podmanTest.CreatePod("")
+ setup.WaitWithDefaultTimeout()
+ Expect(rc).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"pod", "exists", podID})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman pod exists in local storage by short container ID", func() {
+ setup, rc, podID := podmanTest.CreatePod("")
+ setup.WaitWithDefaultTimeout()
+ Expect(rc).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"pod", "exists", podID[0:12]})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+ It("podman pod does not exist in local storage", func() {
+ session := podmanTest.Podman([]string{"pod", "exists", "foobar"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(1))
+ })
+})
diff --git a/test/e2e/export_test.go b/test/e2e/export_test.go
index c11fd777b..42ea45041 100644
--- a/test/e2e/export_test.go
+++ b/test/e2e/export_test.go
@@ -5,6 +5,7 @@ import (
"os"
"path/filepath"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman export", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman export", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
new file mode 100644
index 000000000..0ee078455
--- /dev/null
+++ b/test/e2e/generate_kube_test.go
@@ -0,0 +1,106 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ "github.com/ghodss/yaml"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman generate kube", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+
+ })
+
+ It("podman generate pod kube on bogus object", func() {
+ session := podmanTest.Podman([]string{"generate", "kube", "foobar"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
+
+ It("podman generate service kube on bogus object", func() {
+ session := podmanTest.Podman([]string{"generate", "kube", "-s", "foobar"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
+
+ It("podman generate kube on container", func() {
+ session := podmanTest.RunTopContainer("top")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "top"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ _, err := yaml.Marshal(kube.OutputToString())
+ Expect(err).To(BeNil())
+ })
+
+ It("podman generate service kube on container", func() {
+ session := podmanTest.RunTopContainer("top")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "-s", "top"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ _, err := yaml.Marshal(kube.OutputToString())
+ Expect(err).To(BeNil())
+ })
+
+ It("podman generate kube on pod", func() {
+ _, rc, _ := podmanTest.CreatePod("toppod")
+ Expect(rc).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("topcontainer", "toppod")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "toppod"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ _, err := yaml.Marshal(kube.OutputToString())
+ Expect(err).To(BeNil())
+ })
+
+ It("podman generate service kube on pod", func() {
+ _, rc, _ := podmanTest.CreatePod("toppod")
+ Expect(rc).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("topcontainer", "toppod")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "-s", "toppod"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ _, err := yaml.Marshal(kube.OutputToString())
+ Expect(err).To(BeNil())
+ })
+})
diff --git a/test/e2e/history_test.go b/test/e2e/history_test.go
index d4b5ad5c0..9bec9ad13 100644
--- a/test/e2e/history_test.go
+++ b/test/e2e/history_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman history", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman history", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/images_test.go b/test/e2e/images_test.go
index a8854d08d..af32c032b 100644
--- a/test/e2e/images_test.go
+++ b/test/e2e/images_test.go
@@ -5,6 +5,7 @@ import (
"os"
"sort"
+ . "github.com/containers/libpod/test/utils"
"github.com/docker/go-units"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -14,7 +15,7 @@ var _ = Describe("Podman images", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -22,7 +23,7 @@ var _ = Describe("Podman images", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -62,6 +63,10 @@ var _ = Describe("Podman images", func() {
session.LineInOutputContainsTag("foo", "c")
session.LineInOutputContainsTag("bar", "a")
session.LineInOutputContainsTag("bar", "b")
+ session = podmanTest.Podman([]string{"images", "-qn"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(BeNumerically("==", 2))
})
It("podman images with digests", func() {
diff --git a/test/e2e/import_test.go b/test/e2e/import_test.go
index 80773cf8b..9ed4593c6 100644
--- a/test/e2e/import_test.go
+++ b/test/e2e/import_test.go
@@ -5,6 +5,7 @@ import (
"os"
"path/filepath"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman import", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman import", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/info_test.go b/test/e2e/info_test.go
index dd8645223..e972c86c8 100644
--- a/test/e2e/info_test.go
+++ b/test/e2e/info_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman Info", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman Info", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
})
AfterEach(func() {
diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go
index bff56189e..87c4db935 100644
--- a/test/e2e/inspect_test.go
+++ b/test/e2e/inspect_test.go
@@ -5,6 +5,7 @@ import (
"os"
"strings"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman inspect", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman inspect", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/kill_test.go b/test/e2e/kill_test.go
index fdf42f2b6..913a843cb 100644
--- a/test/e2e/kill_test.go
+++ b/test/e2e/kill_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman kill", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman kill", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index ec274cc34..d312124ab 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -1,22 +1,18 @@
package integration
import (
- "bufio"
- "context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
- "runtime"
"strings"
"testing"
- "time"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/inspect"
- "github.com/containers/storage/pkg/parsers/kernel"
+ . "github.com/containers/libpod/test/utils"
"github.com/containers/storage/pkg/reexec"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -35,14 +31,9 @@ var (
defaultWaitTimeout = 90
)
-// PodmanSession wrapps the gexec.session so we can extend it
-type PodmanSession struct {
- *gexec.Session
-}
-
-// PodmanTest struct for command line options
-type PodmanTest struct {
- PodmanBinary string
+// PodmanTestIntegration struct for command line options
+type PodmanTestIntegration struct {
+ PodmanTest
ConmonBinary string
CrioRoot string
CNIConfigDir string
@@ -50,17 +41,13 @@ type PodmanTest struct {
RunRoot string
StorageOptions string
SignaturePolicyPath string
- ArtifactPath string
- TempDir string
CgroupManager string
Host HostOS
}
-// HostOS is a simple struct for the test os
-type HostOS struct {
- Distribution string
- Version string
- Arch string
+// PodmanSessionIntegration sturct for command line session
+type PodmanSessionIntegration struct {
+ *PodmanSession
}
// TestLibpod ginkgo master function
@@ -80,7 +67,7 @@ var _ = BeforeSuite(func() {
//Cache images
cwd, _ := os.Getwd()
INTEGRATION_ROOT = filepath.Join(cwd, "../../")
- podman := PodmanCreate("/tmp")
+ podman := PodmanTestCreate("/tmp")
podman.ArtifactPath = ARTIFACT_DIR
if _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) {
if err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil {
@@ -110,13 +97,8 @@ var _ = BeforeSuite(func() {
}
})
-// CreateTempDirin
-func CreateTempDirInTempDir() (string, error) {
- return ioutil.TempDir("", "podman_test")
-}
-
-// PodmanCreate creates a PodmanTest instance for the tests
-func PodmanCreate(tempDir string) PodmanTest {
+// PodmanTestCreate creates a PodmanTestIntegration instance for the tests
+func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
host := GetHostDistributionInfo()
cwd, _ := os.Getwd()
@@ -127,7 +109,7 @@ func PodmanCreate(tempDir string) PodmanTest {
}
conmonBinary := filepath.Join("/usr/libexec/podman/conmon")
altConmonBinary := "/usr/libexec/crio/conmon"
- if _, err := os.Stat(altConmonBinary); err == nil {
+ if _, err := os.Stat(conmonBinary); os.IsNotExist(err) {
conmonBinary = altConmonBinary
}
if os.Getenv("CONMON_BINARY") != "" {
@@ -157,8 +139,12 @@ func PodmanCreate(tempDir string) PodmanTest {
CNIConfigDir := "/etc/cni/net.d"
- p := PodmanTest{
- PodmanBinary: podmanBinary,
+ p := &PodmanTestIntegration{
+ PodmanTest: PodmanTest{
+ PodmanBinary: podmanBinary,
+ ArtifactPath: ARTIFACT_DIR,
+ TempDir: tempDir,
+ },
ConmonBinary: conmonBinary,
CrioRoot: filepath.Join(tempDir, "crio"),
CNIConfigDir: CNIConfigDir,
@@ -166,73 +152,56 @@ func PodmanCreate(tempDir string) PodmanTest {
RunRoot: filepath.Join(tempDir, "crio-run"),
StorageOptions: storageOptions,
SignaturePolicyPath: filepath.Join(INTEGRATION_ROOT, "test/policy.json"),
- ArtifactPath: ARTIFACT_DIR,
- TempDir: tempDir,
CgroupManager: cgroupManager,
Host: host,
}
// Setup registries.conf ENV variable
p.setDefaultRegistriesConfigEnv()
+ // Rewrite the PodmanAsUser function
+ p.PodmanMakeOptions = p.makeOptions
return p
}
//MakeOptions assembles all the podman main options
-func (p *PodmanTest) MakeOptions() []string {
- return strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s",
+func (p *PodmanTestIntegration) makeOptions(args []string) []string {
+ podmanOptions := strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s",
p.CrioRoot, p.RunRoot, p.RunCBinary, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager), " ")
-}
-
-// Podman is the exec call to podman on the filesystem, uid and gid the credentials to use
-func (p *PodmanTest) PodmanAsUser(args []string, uid, gid uint32, env []string) *PodmanSession {
- podmanOptions := p.MakeOptions()
if os.Getenv("HOOK_OPTION") != "" {
podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
}
podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...)
podmanOptions = append(podmanOptions, args...)
- if env == nil {
- fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
- } else {
- fmt.Printf("Running: (env: %v) %s %s\n", env, p.PodmanBinary, strings.Join(podmanOptions, " "))
- }
- var command *exec.Cmd
-
- if uid != 0 || gid != 0 {
- nsEnterOpts := append([]string{"--userspec", fmt.Sprintf("%d:%d", uid, gid), "/", p.PodmanBinary}, podmanOptions...)
- command = exec.Command("chroot", nsEnterOpts...)
- } else {
- command = exec.Command(p.PodmanBinary, podmanOptions...)
- }
- if env != nil {
- command.Env = env
- }
-
- session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
- if err != nil {
- Fail(fmt.Sprintf("unable to run podman command: %s\n%v", strings.Join(podmanOptions, " "), err))
- }
- return &PodmanSession{session}
+ return podmanOptions
}
// Podman is the exec call to podman on the filesystem
-func (p *PodmanTest) Podman(args []string) *PodmanSession {
- return p.PodmanAsUser(args, 0, 0, nil)
+func (p *PodmanTestIntegration) Podman(args []string) *PodmanSessionIntegration {
+ podmanSession := p.PodmanBase(args)
+ return &PodmanSessionIntegration{podmanSession}
}
-//WaitForContainer waits on a started container
-func WaitForContainer(p *PodmanTest) bool {
- for i := 0; i < 10; i++ {
- if p.NumberOfRunningContainers() == 1 {
- return true
- }
- time.Sleep(1 * time.Second)
+// PodmanAsUser is the exec call to podman on the filesystem with the specified uid/gid and environment
+func (p *PodmanTestIntegration) PodmanAsUser(args []string, uid, gid uint32, env []string) *PodmanSessionIntegration {
+ podmanSession := p.PodmanAsUserBase(args, uid, gid, env)
+ return &PodmanSessionIntegration{podmanSession}
+}
+
+// PodmanPID execs podman and returns its PID
+func (p *PodmanTestIntegration) PodmanPID(args []string) (*PodmanSessionIntegration, int) {
+ podmanOptions := p.MakeOptions(args)
+ fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
+ command := exec.Command(p.PodmanBinary, podmanOptions...)
+ session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
+ if err != nil {
+ Fail(fmt.Sprintf("unable to run podman command: %s", strings.Join(podmanOptions, " ")))
}
- return false
+ podmanSession := &PodmanSession{session}
+ return &PodmanSessionIntegration{podmanSession}, command.Process.Pid
}
// Cleanup cleans up the temporary store
-func (p *PodmanTest) Cleanup() {
+func (p *PodmanTestIntegration) Cleanup() {
// Remove all containers
stopall := p.Podman([]string{"stop", "-a", "--timeout", "0"})
stopall.WaitWithDefaultTimeout()
@@ -248,7 +217,7 @@ func (p *PodmanTest) Cleanup() {
}
// CleanupPod cleans up the temporary store
-func (p *PodmanTest) CleanupPod() {
+func (p *PodmanTestIntegration) CleanupPod() {
// Remove all containers
session := p.Podman([]string{"pod", "rm", "-fa"})
session.Wait(90)
@@ -258,103 +227,37 @@ func (p *PodmanTest) CleanupPod() {
}
}
-// GrepString takes session output and behaves like grep. it returns a bool
-// if successful and an array of strings on positive matches
-func (s *PodmanSession) GrepString(term string) (bool, []string) {
- var (
- greps []string
- matches bool
- )
-
- for _, line := range strings.Split(s.OutputToString(), "\n") {
- if strings.Contains(line, term) {
- matches = true
- greps = append(greps, line)
- }
+// CleanupVolume cleans up the temporary store
+func (p *PodmanTestIntegration) CleanupVolume() {
+ // Remove all containers
+ session := p.Podman([]string{"volume", "rm", "-fa"})
+ session.Wait(90)
+ // Nuke tempdir
+ if err := os.RemoveAll(p.TempDir); err != nil {
+ fmt.Printf("%q\n", err)
}
- return matches, greps
}
-// Pull Images pulls multiple images
-func (p *PodmanTest) PullImages(images []string) error {
+// PullImages pulls multiple images
+func (p *PodmanTestIntegration) PullImages(images []string) error {
for _, i := range images {
p.PullImage(i)
}
return nil
}
-// Pull Image a single image
+// PullImage pulls a single image
// TODO should the timeout be configurable?
-func (p *PodmanTest) PullImage(image string) error {
+func (p *PodmanTestIntegration) PullImage(image string) error {
session := p.Podman([]string{"pull", image})
session.Wait(60)
Expect(session.ExitCode()).To(Equal(0))
return nil
}
-// OutputToString formats session output to string
-func (s *PodmanSession) OutputToString() string {
- fields := strings.Fields(fmt.Sprintf("%s", s.Out.Contents()))
- return strings.Join(fields, " ")
-}
-
-// OutputToStringArray returns the output as a []string
-// where each array item is a line split by newline
-func (s *PodmanSession) OutputToStringArray() []string {
- var results []string
- output := fmt.Sprintf("%s", s.Out.Contents())
- for _, line := range strings.Split(output, "\n") {
- if line != "" {
- results = append(results, line)
- }
- }
- return results
-}
-
-// ErrorGrepString takes session stderr output and behaves like grep. it returns a bool
-// if successful and an array of strings on positive matches
-func (s *PodmanSession) ErrorGrepString(term string) (bool, []string) {
- var (
- greps []string
- matches bool
- )
-
- for _, line := range strings.Split(s.ErrorToString(), "\n") {
- if strings.Contains(line, term) {
- matches = true
- greps = append(greps, line)
- }
- }
- return matches, greps
-}
-
-// ErrorToString formats session stderr to string
-func (s *PodmanSession) ErrorToString() string {
- fields := strings.Fields(fmt.Sprintf("%s", s.Err.Contents()))
- return strings.Join(fields, " ")
-}
-
-// ErrorToStringArray returns the stderr output as a []string
-// where each array item is a line split by newline
-func (s *PodmanSession) ErrorToStringArray() []string {
- output := fmt.Sprintf("%s", s.Err.Contents())
- return strings.Split(output, "\n")
-}
-
-// IsJSONOutputValid attempts to unmarshal the session buffer
-// and if successful, returns true, else false
-func (s *PodmanSession) IsJSONOutputValid() bool {
- var i interface{}
- if err := json.Unmarshal(s.Out.Contents(), &i); err != nil {
- fmt.Println(err)
- return false
- }
- return true
-}
-
// InspectContainerToJSON takes the session output of an inspect
// container and returns json
-func (s *PodmanSession) InspectContainerToJSON() []inspect.ContainerData {
+func (s *PodmanSessionIntegration) InspectContainerToJSON() []inspect.ContainerData {
var i []inspect.ContainerData
err := json.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
@@ -362,7 +265,7 @@ func (s *PodmanSession) InspectContainerToJSON() []inspect.ContainerData {
}
// InspectPodToJSON takes the sessions output from a pod inspect and returns json
-func (s *PodmanSession) InspectPodToJSON() libpod.PodInspect {
+func (s *PodmanSessionIntegration) InspectPodToJSON() libpod.PodInspect {
var i libpod.PodInspect
err := json.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
@@ -371,30 +274,15 @@ func (s *PodmanSession) InspectPodToJSON() libpod.PodInspect {
// InspectImageJSON takes the session output of an inspect
// image and returns json
-func (s *PodmanSession) InspectImageJSON() []inspect.ImageData {
+func (s *PodmanSessionIntegration) InspectImageJSON() []inspect.ImageData {
var i []inspect.ImageData
err := json.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
return i
}
-func (s *PodmanSession) WaitWithDefaultTimeout() {
- s.Wait(defaultWaitTimeout)
- fmt.Println("output:", s.OutputToString())
-}
-
-// SystemExec is used to exec a system command to check its exit code or output
-func (p *PodmanTest) SystemExec(command string, args []string) *PodmanSession {
- c := exec.Command(command, args...)
- session, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)
- if err != nil {
- Fail(fmt.Sprintf("unable to run command: %s %s", command, strings.Join(args, " ")))
- }
- return &PodmanSession{session}
-}
-
// CreateArtifact creates a cached image in the artifact dir
-func (p *PodmanTest) CreateArtifact(image string) error {
+func (p *PodmanTestIntegration) CreateArtifact(image string) error {
if os.Getenv("NO_TEST_CACHE") != "" {
return nil
}
@@ -415,7 +303,7 @@ func (p *PodmanTest) CreateArtifact(image string) error {
}
// RestoreArtifact puts the cached image into our test store
-func (p *PodmanTest) RestoreArtifact(image string) error {
+func (p *PodmanTestIntegration) RestoreArtifact(image string) error {
fmt.Printf("Restoring %s...\n", image)
dest := strings.Split(image, "/")
destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1))
@@ -425,7 +313,7 @@ func (p *PodmanTest) RestoreArtifact(image string) error {
}
// RestoreAllArtifacts unpacks all cached images
-func (p *PodmanTest) RestoreAllArtifacts() error {
+func (p *PodmanTestIntegration) RestoreAllArtifacts() error {
if os.Getenv("NO_TEST_CACHE") != "" {
return nil
}
@@ -439,7 +327,7 @@ func (p *PodmanTest) RestoreAllArtifacts() error {
// CreatePod creates a pod with no infra container
// it optionally takes a pod name
-func (p *PodmanTest) CreatePod(name string) (*PodmanSession, int, string) {
+func (p *PodmanTestIntegration) CreatePod(name string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"pod", "create", "--infra=false", "--share", ""}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
@@ -451,7 +339,7 @@ func (p *PodmanTest) CreatePod(name string) (*PodmanSession, int, string) {
//RunTopContainer runs a simple container in the background that
// runs top. If the name passed != "", it will have a name
-func (p *PodmanTest) RunTopContainer(name string) *PodmanSession {
+func (p *PodmanTestIntegration) RunTopContainer(name string) *PodmanSessionIntegration {
var podmanArgs = []string{"run"}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
@@ -460,7 +348,7 @@ func (p *PodmanTest) RunTopContainer(name string) *PodmanSession {
return p.Podman(podmanArgs)
}
-func (p *PodmanTest) RunTopContainerInPod(name, pod string) *PodmanSession {
+func (p *PodmanTestIntegration) RunTopContainerInPod(name, pod string) *PodmanSessionIntegration {
var podmanArgs = []string{"run", "--pod", pod}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
@@ -471,7 +359,7 @@ func (p *PodmanTest) RunTopContainerInPod(name, pod string) *PodmanSession {
//RunLsContainer runs a simple container in the background that
// simply runs ls. If the name passed != "", it will have a name
-func (p *PodmanTest) RunLsContainer(name string) (*PodmanSession, int, string) {
+func (p *PodmanTestIntegration) RunLsContainer(name string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"run"}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
@@ -482,7 +370,7 @@ func (p *PodmanTest) RunLsContainer(name string) (*PodmanSession, int, string) {
return session, session.ExitCode(), session.OutputToString()
}
-func (p *PodmanTest) RunLsContainerInPod(name, pod string) (*PodmanSession, int, string) {
+func (p *PodmanTestIntegration) RunLsContainerInPod(name, pod string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"run", "--pod", pod}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
@@ -493,147 +381,9 @@ func (p *PodmanTest) RunLsContainerInPod(name, pod string) (*PodmanSession, int,
return session, session.ExitCode(), session.OutputToString()
}
-//NumberOfContainersRunning returns an int of how many
-// containers are currently running.
-func (p *PodmanTest) NumberOfContainersRunning() int {
- var containers []string
- ps := p.Podman([]string{"ps", "-q"})
- ps.WaitWithDefaultTimeout()
- Expect(ps.ExitCode()).To(Equal(0))
- for _, i := range ps.OutputToStringArray() {
- if i != "" {
- containers = append(containers, i)
- }
- }
- return len(containers)
-}
-
-// NumberOfContainers returns an int of how many
-// containers are currently defined.
-func (p *PodmanTest) NumberOfContainers() int {
- var containers []string
- ps := p.Podman([]string{"ps", "-aq"})
- ps.WaitWithDefaultTimeout()
- Expect(ps.ExitCode()).To(Equal(0))
- for _, i := range ps.OutputToStringArray() {
- if i != "" {
- containers = append(containers, i)
- }
- }
- return len(containers)
-}
-
-// NumberOfPods returns an int of how many
-// pods are currently defined.
-func (p *PodmanTest) NumberOfPods() int {
- var pods []string
- ps := p.Podman([]string{"pod", "ps", "-q"})
- ps.WaitWithDefaultTimeout()
- Expect(ps.ExitCode()).To(Equal(0))
- for _, i := range ps.OutputToStringArray() {
- if i != "" {
- pods = append(pods, i)
- }
- }
- return len(pods)
-}
-
-// NumberOfRunningContainers returns an int of how many containers are currently
-// running
-func (p *PodmanTest) NumberOfRunningContainers() int {
- var containers []string
- ps := p.Podman([]string{"ps", "-q"})
- ps.WaitWithDefaultTimeout()
- Expect(ps.ExitCode()).To(Equal(0))
- for _, i := range ps.OutputToStringArray() {
- if i != "" {
- containers = append(containers, i)
- }
- }
- return len(containers)
-}
-
-// StringInSlice determines if a string is in a string slice, returns bool
-func StringInSlice(s string, sl []string) bool {
- for _, i := range sl {
- if i == s {
- return true
- }
- }
- return false
-}
-
-//LineInOutputStartsWith returns true if a line in a
-// session output starts with the supplied string
-func (s *PodmanSession) LineInOuputStartsWith(term string) bool {
- for _, i := range s.OutputToStringArray() {
- if strings.HasPrefix(i, term) {
- return true
- }
- }
- return false
-}
-
-//LineInOutputContains returns true if a line in a
-// session output starts with the supplied string
-func (s *PodmanSession) LineInOutputContains(term string) bool {
- for _, i := range s.OutputToStringArray() {
- if strings.Contains(i, term) {
- return true
- }
- }
- return false
-}
-
-//tagOutPutToMap parses each string in imagesOutput and returns
-// a map of repo:tag pairs. Notice, the first array item will
-// be skipped as it's considered to be the header.
-func tagOutputToMap(imagesOutput []string) map[string]string {
- m := make(map[string]string)
- // iterate over output but skip the header
- for _, i := range imagesOutput[1:] {
- tmp := []string{}
- for _, x := range strings.Split(i, " ") {
- if x != "" {
- tmp = append(tmp, x)
- }
- }
- // podman-images(1) return a list like output
- // in the format of "Repository Tag [...]"
- if len(tmp) < 2 {
- continue
- }
- m[tmp[0]] = tmp[1]
- }
- return m
-}
-
-//LineInOutputContainsTag returns true if a line in the
-// session's output contains the repo-tag pair as returned
-// by podman-images(1).
-func (s *PodmanSession) LineInOutputContainsTag(repo, tag string) bool {
- tagMap := tagOutputToMap(s.OutputToStringArray())
- for r, t := range tagMap {
- if repo == r && tag == t {
- return true
- }
- }
- return false
-}
-
-//GetContainerStatus returns the containers state.
-// This function assumes only one container is active.
-func (p *PodmanTest) GetContainerStatus() string {
- var podmanArgs = []string{"ps"}
- podmanArgs = append(podmanArgs, "--all", "--format={{.Status}}")
- session := p.Podman(podmanArgs)
- session.WaitWithDefaultTimeout()
- return session.OutputToString()
-}
-
// BuildImage uses podman build and buildah to build an image
// called imageName based on a string dockerfile
-func (p *PodmanTest) BuildImage(dockerfile, imageName string, layers string) {
+func (p *PodmanTestIntegration) BuildImage(dockerfile, imageName string, layers string) {
dockerfilePath := filepath.Join(p.TempDir, "Dockerfile")
err := ioutil.WriteFile(dockerfilePath, []byte(dockerfile), 0755)
Expect(err).To(BeNil())
@@ -642,34 +392,12 @@ func (p *PodmanTest) BuildImage(dockerfile, imageName string, layers string) {
Expect(session.ExitCode()).To(Equal(0))
}
-//GetHostDistributionInfo returns a struct with its distribution name and version
-func GetHostDistributionInfo() HostOS {
- f, err := os.Open("/etc/os-release")
- defer f.Close()
- if err != nil {
- return HostOS{}
- }
-
- l := bufio.NewScanner(f)
- host := HostOS{}
- host.Arch = runtime.GOARCH
- for l.Scan() {
- if strings.HasPrefix(l.Text(), "ID=") {
- host.Distribution = strings.Replace(strings.TrimSpace(strings.Join(strings.Split(l.Text(), "=")[1:], "")), "\"", "", -1)
- }
- if strings.HasPrefix(l.Text(), "VERSION_ID=") {
- host.Version = strings.Replace(strings.TrimSpace(strings.Join(strings.Split(l.Text(), "=")[1:], "")), "\"", "", -1)
- }
- }
- return host
-}
-
-func (p *PodmanTest) setDefaultRegistriesConfigEnv() {
+func (p *PodmanTestIntegration) setDefaultRegistriesConfigEnv() {
defaultFile := filepath.Join(INTEGRATION_ROOT, "test/registries.conf")
os.Setenv("REGISTRIES_CONFIG_PATH", defaultFile)
}
-func (p *PodmanTest) setRegistriesConfigEnv(b []byte) {
+func (p *PodmanTestIntegration) setRegistriesConfigEnv(b []byte) {
outfile := filepath.Join(p.TempDir, "registries.conf")
os.Setenv("REGISTRIES_CONFIG_PATH", outfile)
ioutil.WriteFile(outfile, b, 0644)
@@ -678,81 +406,3 @@ func (p *PodmanTest) setRegistriesConfigEnv(b []byte) {
func resetRegistriesConfigEnv() {
os.Setenv("REGISTRIES_CONFIG_PATH", "")
}
-
-// IsKernelNewThan compares the current kernel version to one provided. If
-// the kernel is equal to or greater, returns true
-func IsKernelNewThan(version string) (bool, error) {
- inputVersion, err := kernel.ParseRelease(version)
- if err != nil {
- return false, err
- }
- kv, err := kernel.GetKernelVersion()
- if err == nil {
- return false, err
- }
- // CompareKernelVersion compares two kernel.VersionInfo structs.
- // Returns -1 if a < b, 0 if a == b, 1 it a > b
- result := kernel.CompareKernelVersion(*kv, *inputVersion)
- if result >= 0 {
- return true, nil
- }
- return false, nil
-
-}
-
-//Wait process or service inside container start, and ready to be used.
-func WaitContainerReady(p *PodmanTest, id string, expStr string, timeout int, step int) bool {
- startTime := time.Now()
- s := p.Podman([]string{"logs", id})
- s.WaitWithDefaultTimeout()
- fmt.Println(startTime)
- for {
- if time.Since(startTime) >= time.Duration(timeout)*time.Second {
- return false
- }
- if strings.Contains(s.OutputToString(), expStr) {
- return true
- }
- time.Sleep(time.Duration(step) * time.Second)
- s = p.Podman([]string{"logs", id})
- s.WaitWithDefaultTimeout()
- }
-}
-
-//IsCommandAvaible check if command exist
-func IsCommandAvailable(command string) bool {
- check := exec.Command("bash", "-c", strings.Join([]string{"command -v", command}, " "))
- err := check.Run()
- if err != nil {
- return false
- }
- return true
-}
-
-// WriteJsonFile write json format data to a json file
-func WriteJsonFile(data []byte, filePath string) error {
- var jsonData map[string]interface{}
- json.Unmarshal(data, &jsonData)
- formatJson, _ := json.MarshalIndent(jsonData, "", " ")
- return ioutil.WriteFile(filePath, formatJson, 0644)
-}
-
-func getTestContext() context.Context {
- return context.Background()
-}
-
-func containerized() bool {
- container := os.Getenv("container")
- if container != "" {
- return true
- }
- b, err := ioutil.ReadFile("/proc/1/cgroup")
- if err != nil {
- // shrug, if we cannot read that file, return false
- return false
- }
- if strings.Index(string(b), "docker") > -1 {
- return true
- }
- return false
-}
diff --git a/test/e2e/load_test.go b/test/e2e/load_test.go
index 21e8a4859..4d7007191 100644
--- a/test/e2e/load_test.go
+++ b/test/e2e/load_test.go
@@ -5,6 +5,7 @@ import (
"os"
"path/filepath"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman load", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman load", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -55,7 +56,7 @@ var _ = Describe("Podman load", func() {
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
- compress := podmanTest.SystemExec("gzip", []string{outfile})
+ compress := SystemExec("gzip", []string{outfile})
compress.WaitWithDefaultTimeout()
outfile = outfile + ".gz"
@@ -253,7 +254,7 @@ var _ = Describe("Podman load", func() {
save := podmanTest.Podman([]string{"save", "-o", outfile, BB})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
- session := podmanTest.SystemExec("xz", []string{outfile})
+ session := SystemExec("xz", []string{outfile})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 6888863ca..236ddb221 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman logs", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman logs", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/mount_test.go b/test/e2e/mount_test.go
index fbb0a3eb7..a93a0aa4a 100644
--- a/test/e2e/mount_test.go
+++ b/test/e2e/mount_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman mount", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman mount", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/namespace_test.go b/test/e2e/namespace_test.go
index 017edd231..ebce09f54 100644
--- a/test/e2e/namespace_test.go
+++ b/test/e2e/namespace_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman namespaces", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman namespaces", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pause_test.go b/test/e2e/pause_test.go
index 1a2eb1a09..e109bc077 100644
--- a/test/e2e/pause_test.go
+++ b/test/e2e/pause_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pause", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
pausedState := "Paused"
@@ -23,7 +24,7 @@ var _ = Describe("Podman pause", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -91,7 +92,7 @@ var _ = Describe("Podman pause", func() {
})
- It("podman remove a paused container by id", func() {
+ It("podman remove a paused container by id without force", func() {
session := podmanTest.RunTopContainer("")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -111,25 +112,26 @@ var _ = Describe("Podman pause", func() {
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring(pausedState))
- result = podmanTest.Podman([]string{"rm", "--force", cid})
- result.WaitWithDefaultTimeout()
+ })
- Expect(result.ExitCode()).To(Equal(125))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
- Expect(podmanTest.GetContainerStatus()).To(ContainSubstring(pausedState))
+ It("podman remove a paused container by id with force", func() {
+ session := podmanTest.RunTopContainer("")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
- result = podmanTest.Podman([]string{"unpause", cid})
+ result := podmanTest.Podman([]string{"pause", cid})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring(pausedState))
result = podmanTest.Podman([]string{"rm", "--force", cid})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
-
})
It("podman stop a paused container by id", func() {
diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go
index 0ce1e22a8..5abf9613b 100644
--- a/test/e2e/pod_create_test.go
+++ b/test/e2e/pod_create_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod create", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod create", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -79,4 +80,43 @@ var _ = Describe("Podman pod create", func() {
check.WaitWithDefaultTimeout()
Expect(len(check.OutputToStringArray())).To(Equal(0))
})
+
+ It("podman create pod without network portbindings", func() {
+ name := "test"
+ session := podmanTest.Podman([]string{"pod", "create", "--name", name})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ pod := session.OutputToString()
+
+ webserver := podmanTest.Podman([]string{"run", "--pod", pod, "-dt", nginx})
+ webserver.WaitWithDefaultTimeout()
+ Expect(webserver.ExitCode()).To(Equal(0))
+
+ check := SystemExec("nc", []string{"-z", "localhost", "80"})
+ check.WaitWithDefaultTimeout()
+ Expect(check.ExitCode()).To(Equal(1))
+ })
+
+ It("podman create pod with network portbindings", func() {
+ name := "test"
+ session := podmanTest.Podman([]string{"pod", "create", "--name", name, "-p", "80:80"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ pod := session.OutputToString()
+
+ webserver := podmanTest.Podman([]string{"run", "--pod", pod, "-dt", nginx})
+ webserver.WaitWithDefaultTimeout()
+ Expect(webserver.ExitCode()).To(Equal(0))
+
+ check := SystemExec("nc", []string{"-z", "localhost", "80"})
+ check.WaitWithDefaultTimeout()
+ Expect(check.ExitCode()).To(Equal(0))
+ })
+
+ It("podman create pod with no infra but portbindings should fail", func() {
+ name := "test"
+ session := podmanTest.Podman([]string{"pod", "create", "--infra=false", "--name", name, "-p", "80:80"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+ })
})
diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go
index f1e2375ce..8c7c09c97 100644
--- a/test/e2e/pod_infra_container_test.go
+++ b/test/e2e/pod_infra_container_test.go
@@ -5,6 +5,7 @@ import (
"os"
"strconv"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman pod create", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman pod create", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
podmanTest.RestoreArtifact(infra)
})
diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go
index 667e59f38..51e95f788 100644
--- a/test/e2e/pod_inspect_test.go
+++ b/test/e2e/pod_inspect_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod inspect", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod inspect", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_kill_test.go b/test/e2e/pod_kill_test.go
index b29fe1e17..d9cec2cad 100644
--- a/test/e2e/pod_kill_test.go
+++ b/test/e2e/pod_kill_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod kill", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod kill", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_pause_test.go b/test/e2e/pod_pause_test.go
index 384cbfcb7..8f766d3db 100644
--- a/test/e2e/pod_pause_test.go
+++ b/test/e2e/pod_pause_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod pause", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
pausedState := "Paused"
@@ -22,7 +23,7 @@ var _ = Describe("Podman pod pause", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_pod_namespaces.go b/test/e2e/pod_pod_namespaces.go
index 3e84005c3..b1d5abb1c 100644
--- a/test/e2e/pod_pod_namespaces.go
+++ b/test/e2e/pod_pod_namespaces.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod create", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod create", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
podmanTest.RestoreArtifact(infra)
})
diff --git a/test/e2e/pod_ps_test.go b/test/e2e/pod_ps_test.go
index b48cb9578..9e816bcfa 100644
--- a/test/e2e/pod_ps_test.go
+++ b/test/e2e/pod_ps_test.go
@@ -5,6 +5,7 @@ import (
"os"
"sort"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman ps", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman ps", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_restart_test.go b/test/e2e/pod_restart_test.go
index e486f8791..d0964e8de 100644
--- a/test/e2e/pod_restart_test.go
+++ b/test/e2e/pod_restart_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod restart", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod restart", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go
index 09002e954..48767b33f 100644
--- a/test/e2e/pod_rm_test.go
+++ b/test/e2e/pod_rm_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod rm", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod rm", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_start_test.go b/test/e2e/pod_start_test.go
index 9d2ea9b26..346346425 100644
--- a/test/e2e/pod_start_test.go
+++ b/test/e2e/pod_start_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod start", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod start", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_stats_test.go b/test/e2e/pod_stats_test.go
index f9c8e06c4..d7b9a8f48 100644
--- a/test/e2e/pod_stats_test.go
+++ b/test/e2e/pod_stats_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod stats", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod stats", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go
index 32f8559ad..6c5319a3d 100644
--- a/test/e2e/pod_stop_test.go
+++ b/test/e2e/pod_stop_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman pod stop", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman pod stop", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pod_top_test.go b/test/e2e/pod_top_test.go
index f72456307..3dc80ddfb 100644
--- a/test/e2e/pod_top_test.go
+++ b/test/e2e/pod_top_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman top", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman top", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/port_test.go b/test/e2e/port_test.go
index ed15b54ac..09f3ab53a 100644
--- a/test/e2e/port_test.go
+++ b/test/e2e/port_test.go
@@ -5,6 +5,7 @@ import (
"os"
"strings"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman port", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman port", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go
new file mode 100644
index 000000000..6679a676c
--- /dev/null
+++ b/test/e2e/prune_test.go
@@ -0,0 +1,88 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var pruneImage = `
+FROM alpine:latest
+LABEL RUN podman --version
+RUN apk update
+RUN apk add bash`
+
+var _ = Describe("Podman rm", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman container prune containers", func() {
+ top := podmanTest.RunTopContainer("")
+ top.WaitWithDefaultTimeout()
+ Expect(top.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"run", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ prune := podmanTest.Podman([]string{"container", "prune"})
+ prune.WaitWithDefaultTimeout()
+ Expect(prune.ExitCode()).To(Equal(0))
+
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+ })
+
+ It("podman image prune none images", func() {
+ podmanTest.BuildImage(pruneImage, "alpine_bash:latest", "true")
+
+ none := podmanTest.Podman([]string{"images", "-a"})
+ none.WaitWithDefaultTimeout()
+ Expect(none.ExitCode()).To(Equal(0))
+ hasNone, _ := none.GrepString("<none>")
+ Expect(hasNone).To(BeTrue())
+
+ prune := podmanTest.Podman([]string{"image", "prune"})
+ prune.WaitWithDefaultTimeout()
+ Expect(prune.ExitCode()).To(Equal(0))
+
+ after := podmanTest.Podman([]string{"images", "-a"})
+ after.WaitWithDefaultTimeout()
+ Expect(none.ExitCode()).To(Equal(0))
+ hasNoneAfter, _ := after.GrepString("<none>")
+ Expect(hasNoneAfter).To(BeFalse())
+ })
+
+ It("podman image prune unused images", func() {
+ prune := podmanTest.Podman([]string{"image", "prune"})
+ prune.WaitWithDefaultTimeout()
+ Expect(prune.ExitCode()).To(Equal(0))
+
+ images := podmanTest.Podman([]string{"images", "-a"})
+ images.WaitWithDefaultTimeout()
+ // all images are unused, so they all should be deleted!
+ Expect(len(images.OutputToStringArray())).To(Equal(0))
+ })
+
+})
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index a873b57bb..9caa6e7f1 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -6,6 +6,7 @@ import (
"regexp"
"sort"
+ . "github.com/containers/libpod/test/utils"
"github.com/docker/go-units"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -15,7 +16,7 @@ var _ = Describe("Podman ps", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -23,7 +24,7 @@ var _ = Describe("Podman ps", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go
index 606160198..ad8742984 100644
--- a/test/e2e/pull_test.go
+++ b/test/e2e/pull_test.go
@@ -4,6 +4,7 @@ import (
"os"
"fmt"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"strings"
@@ -13,7 +14,7 @@ var _ = Describe("Podman pull", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman pull", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -101,7 +102,7 @@ var _ = Describe("Podman pull", func() {
session = podmanTest.Podman([]string{"rmi", "alpine"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"/tmp/alp.tar"})
+ clean := SystemExec("rm", []string{"/tmp/alp.tar"})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
@@ -119,12 +120,12 @@ var _ = Describe("Podman pull", func() {
session = podmanTest.Podman([]string{"rmi", "alpine"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"/tmp/oci-alp.tar"})
+ clean := SystemExec("rm", []string{"/tmp/oci-alp.tar"})
clean.WaitWithDefaultTimeout()
})
It("podman pull from local directory", func() {
- setup := podmanTest.SystemExec("mkdir", []string{"-p", "/tmp/podmantestdir"})
+ setup := SystemExec("mkdir", []string{"-p", "/tmp/podmantestdir"})
setup.WaitWithDefaultTimeout()
session := podmanTest.Podman([]string{"push", "alpine", "dir:/tmp/podmantestdir"})
session.WaitWithDefaultTimeout()
@@ -139,7 +140,7 @@ var _ = Describe("Podman pull", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"-fr", "/tmp/podmantestdir"})
+ clean := SystemExec("rm", []string{"-fr", "/tmp/podmantestdir"})
clean.WaitWithDefaultTimeout()
})
diff --git a/test/e2e/push_test.go b/test/e2e/push_test.go
index 5e3d3745a..3447cd57e 100644
--- a/test/e2e/push_test.go
+++ b/test/e2e/push_test.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
"strings"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -14,7 +15,7 @@ var _ = Describe("Podman push", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -22,7 +23,7 @@ var _ = Describe("Podman push", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -52,7 +53,7 @@ var _ = Describe("Podman push", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"-fr", "/tmp/busybox"})
+ clean := SystemExec("rm", []string{"-fr", "/tmp/busybox"})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
@@ -66,7 +67,7 @@ var _ = Describe("Podman push", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
@@ -82,20 +83,20 @@ var _ = Describe("Podman push", func() {
authPath := filepath.Join(podmanTest.TempDir, "auth")
os.Mkdir(authPath, os.ModePerm)
os.MkdirAll("/etc/containers/certs.d/localhost:5000", os.ModePerm)
- debug := podmanTest.SystemExec("ls", []string{"-l", podmanTest.TempDir})
+ debug := SystemExec("ls", []string{"-l", podmanTest.TempDir})
debug.WaitWithDefaultTimeout()
cwd, _ := os.Getwd()
certPath := filepath.Join(cwd, "../", "certs")
if IsCommandAvailable("getenforce") {
- ge := podmanTest.SystemExec("getenforce", []string{})
+ ge := SystemExec("getenforce", []string{})
ge.WaitWithDefaultTimeout()
if ge.OutputToString() == "Enforcing" {
- se := podmanTest.SystemExec("setenforce", []string{"0"})
+ se := SystemExec("setenforce", []string{"0"})
se.WaitWithDefaultTimeout()
- defer podmanTest.SystemExec("setenforce", []string{"1"})
+ defer SystemExec("setenforce", []string{"1"})
}
}
podmanTest.RestoreArtifact(registry)
@@ -108,7 +109,7 @@ var _ = Describe("Podman push", func() {
f.WriteString(session.OutputToString())
f.Sync()
- debug = podmanTest.SystemExec("cat", []string{filepath.Join(authPath, "htpasswd")})
+ debug = SystemExec("cat", []string{filepath.Join(authPath, "htpasswd")})
debug.WaitWithDefaultTimeout()
session = podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry", "-v",
@@ -119,7 +120,7 @@ var _ = Describe("Podman push", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
@@ -134,7 +135,7 @@ var _ = Describe("Podman push", func() {
push.WaitWithDefaultTimeout()
Expect(push.ExitCode()).To(Equal(0))
- setup := podmanTest.SystemExec("cp", []string{filepath.Join(certPath, "domain.crt"), "/etc/containers/certs.d/localhost:5000/ca.crt"})
+ setup := SystemExec("cp", []string{filepath.Join(certPath, "domain.crt"), "/etc/containers/certs.d/localhost:5000/ca.crt"})
setup.WaitWithDefaultTimeout()
defer os.RemoveAll("/etc/containers/certs.d/localhost:5000")
@@ -155,20 +156,20 @@ var _ = Describe("Podman push", func() {
session := podmanTest.Podman([]string{"push", ALPINE, "docker-archive:/tmp/alp:latest"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"/tmp/alp"})
+ clean := SystemExec("rm", []string{"/tmp/alp"})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
It("podman push to docker daemon", func() {
- setup := podmanTest.SystemExec("bash", []string{"-c", "systemctl status docker 2>&1"})
+ setup := SystemExec("bash", []string{"-c", "systemctl status docker 2>&1"})
setup.WaitWithDefaultTimeout()
if setup.LineInOutputContains("Active: inactive") {
- setup = podmanTest.SystemExec("systemctl", []string{"start", "docker"})
+ setup = SystemExec("systemctl", []string{"start", "docker"})
setup.WaitWithDefaultTimeout()
- defer podmanTest.SystemExec("systemctl", []string{"stop", "docker"})
+ defer SystemExec("systemctl", []string{"stop", "docker"})
} else if setup.ExitCode() != 0 {
Skip("Docker is not available")
}
@@ -177,12 +178,12 @@ var _ = Describe("Podman push", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- check := podmanTest.SystemExec("docker", []string{"images", "--format", "{{.Repository}}:{{.Tag}}"})
+ check := SystemExec("docker", []string{"images", "--format", "{{.Repository}}:{{.Tag}}"})
check.WaitWithDefaultTimeout()
Expect(check.ExitCode()).To(Equal(0))
Expect(check.OutputToString()).To(ContainSubstring("alpine:podmantest"))
- clean := podmanTest.SystemExec("docker", []string{"rmi", "alpine:podmantest"})
+ clean := SystemExec("docker", []string{"rmi", "alpine:podmantest"})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
@@ -191,7 +192,7 @@ var _ = Describe("Podman push", func() {
session := podmanTest.Podman([]string{"push", ALPINE, "oci-archive:/tmp/alp.tar:latest"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"/tmp/alp.tar"})
+ clean := SystemExec("rm", []string{"/tmp/alp.tar"})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
@@ -204,14 +205,14 @@ var _ = Describe("Podman push", func() {
ostreePath := filepath.Join(podmanTest.TempDir, "ostree/repo")
os.MkdirAll(ostreePath, os.ModePerm)
- setup := podmanTest.SystemExec("ostree", []string{strings.Join([]string{"--repo=", ostreePath}, ""), "init"})
+ setup := SystemExec("ostree", []string{strings.Join([]string{"--repo=", ostreePath}, ""), "init"})
setup.WaitWithDefaultTimeout()
session := podmanTest.Podman([]string{"push", ALPINE, strings.Join([]string{"ostree:alp@", ostreePath}, "")})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"-rf", ostreePath})
+ clean := SystemExec("rm", []string{"-rf", ostreePath})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
@@ -220,7 +221,7 @@ var _ = Describe("Podman push", func() {
session := podmanTest.Podman([]string{"push", ALPINE, "docker-archive:/tmp/alp"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"/tmp/alp"})
+ clean := SystemExec("rm", []string{"/tmp/alp"})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
@@ -229,7 +230,7 @@ var _ = Describe("Podman push", func() {
session := podmanTest.Podman([]string{"push", ALPINE, "oci-archive:/tmp/alp-oci"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- clean := podmanTest.SystemExec("rm", []string{"/tmp/alp-oci"})
+ clean := SystemExec("rm", []string{"/tmp/alp-oci"})
clean.WaitWithDefaultTimeout()
Expect(clean.ExitCode()).To(Equal(0))
})
diff --git a/test/e2e/refresh_test.go b/test/e2e/refresh_test.go
index c4a65aa47..bf8fff105 100644
--- a/test/e2e/refresh_test.go
+++ b/test/e2e/refresh_test.go
@@ -5,6 +5,7 @@ import (
"os"
"time"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman refresh", func() {
var (
tmpdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman refresh", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tmpdir)
+ podmanTest = PodmanTestCreate(tmpdir)
podmanTest.RestoreAllArtifacts()
})
@@ -43,13 +44,13 @@ var _ = Describe("Podman refresh", func() {
createSession.WaitWithDefaultTimeout()
Expect(createSession.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfRunningContainers()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
refreshSession := podmanTest.Podman([]string{"container", "refresh"})
refreshSession.WaitWithDefaultTimeout()
Expect(refreshSession.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfRunningContainers()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
})
Specify("Refresh with running container restarts container", func() {
@@ -57,7 +58,7 @@ var _ = Describe("Podman refresh", func() {
createSession.WaitWithDefaultTimeout()
Expect(createSession.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfRunningContainers()).To(Equal(1))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
// HACK: ensure container starts before we move on
time.Sleep(1 * time.Second)
@@ -66,6 +67,6 @@ var _ = Describe("Podman refresh", func() {
refreshSession.WaitWithDefaultTimeout()
Expect(refreshSession.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfRunningContainers()).To(Equal(1))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
})
})
diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go
index eca2bbcda..30801c272 100644
--- a/test/e2e/restart_test.go
+++ b/test/e2e/restart_test.go
@@ -5,6 +5,7 @@ import (
"os"
"time"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman restart", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman restart", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -74,7 +75,7 @@ var _ = Describe("Podman restart", func() {
It("Podman restart running container", func() {
_ = podmanTest.RunTopContainer("test1")
- ok := WaitForContainer(&podmanTest)
+ ok := WaitForContainer(podmanTest)
Expect(ok).To(BeTrue())
startTime := podmanTest.Podman([]string{"inspect", "--format='{{.State.StartedAt}}'", "test1"})
startTime.WaitWithDefaultTimeout()
diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go
index cbc03a078..c6a2b61ee 100644
--- a/test/e2e/rm_test.go
+++ b/test/e2e/rm_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman rm", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman rm", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/rmi_test.go b/test/e2e/rmi_test.go
index 2a1a0da77..22bfbbe8c 100644
--- a/test/e2e/rmi_test.go
+++ b/test/e2e/rmi_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman rmi", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman rmi", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -249,4 +250,25 @@ var _ = Describe("Podman rmi", func() {
session2.WaitWithDefaultTimeout()
Expect(session2.ExitCode()).To(Equal(0))
})
+
+ It("podman rmi -a with parent|child images", func() {
+ dockerfile := `FROM docker.io/library/alpine:latest AS base
+RUN touch /1
+ENV LOCAL=/1
+RUN find $LOCAL
+FROM base
+RUN find $LOCAL
+
+`
+ podmanTest.BuildImage(dockerfile, "test", "true")
+ session := podmanTest.Podman([]string{"rmi", "-a"})
+ session.WaitWithDefaultTimeout()
+ fmt.Println(session.OutputToString())
+ Expect(session.ExitCode()).To(Equal(0))
+
+ images := podmanTest.Podman([]string{"images", "--all"})
+ images.WaitWithDefaultTimeout()
+ Expect(images.ExitCode()).To(Equal(0))
+ Expect(len(images.OutputToStringArray())).To(Equal(0))
+ })
})
diff --git a/test/e2e/rootless_test.go b/test/e2e/rootless_test.go
index 876e10969..037af9688 100644
--- a/test/e2e/rootless_test.go
+++ b/test/e2e/rootless_test.go
@@ -9,6 +9,7 @@ import (
"runtime"
"syscall"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -30,7 +31,7 @@ var _ = Describe("Podman rootless", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -38,7 +39,7 @@ var _ = Describe("Podman rootless", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.CgroupManager = "cgroupfs"
podmanTest.StorageOptions = ROOTLESS_STORAGE_OPTIONS
podmanTest.RestoreAllArtifacts()
@@ -55,6 +56,7 @@ var _ = Describe("Podman rootless", func() {
commands := []string{"help", "version"}
for _, v := range commands {
env := os.Environ()
+ env = append(env, "USER=foo")
cmd := podmanTest.PodmanAsUser([]string{v}, 1000, 1000, env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
@@ -68,7 +70,7 @@ var _ = Describe("Podman rootless", func() {
return os.Lchown(p, 1000, 1000)
}
- type rootlessCB func(test PodmanTest, xdgRuntimeDir string, home string, mountPath string)
+ type rootlessCB func(test *PodmanTestIntegration, xdgRuntimeDir string, home string, mountPath string)
runInRootlessContext := func(cb rootlessCB) {
// Check if we can create an user namespace
@@ -91,7 +93,7 @@ var _ = Describe("Podman rootless", func() {
tempdir, err := CreateTempDirInTempDir()
Expect(err).To(BeNil())
- rootlessTest := PodmanCreate(tempdir)
+ rootlessTest := PodmanTestCreate(tempdir)
rootlessTest.CgroupManager = "cgroupfs"
rootlessTest.StorageOptions = ROOTLESS_STORAGE_OPTIONS
err = filepath.Walk(tempdir, chownFunc)
@@ -116,11 +118,12 @@ var _ = Describe("Podman rootless", func() {
}
It("podman rootless pod", func() {
- f := func(rootlessTest PodmanTest, xdgRuntimeDir string, home string, mountPath string) {
+ f := func(rootlessTest *PodmanTestIntegration, xdgRuntimeDir string, home string, mountPath string) {
env := os.Environ()
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", xdgRuntimeDir))
env = append(env, fmt.Sprintf("HOME=%s", home))
env = append(env, "PODMAN_ALLOW_SINGLE_ID_MAPPING_IN_USERNS=1")
+ env = append(env, "USER=foo")
cmd := rootlessTest.PodmanAsUser([]string{"pod", "create", "--infra=false"}, 1000, 1000, env)
cmd.WaitWithDefaultTimeout()
@@ -151,19 +154,21 @@ var _ = Describe("Podman rootless", func() {
env := os.Environ()
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", xdgRuntimeDir))
env = append(env, fmt.Sprintf("HOME=%s", home))
+ env = append(env, "USER=foo")
cmd := podmanTest.PodmanAsUser([]string{"search", "docker.io/busybox"}, 1000, 1000, env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
})
runRootlessHelper := func(args []string) {
- f := func(rootlessTest PodmanTest, xdgRuntimeDir string, home string, mountPath string) {
+ f := func(rootlessTest *PodmanTestIntegration, xdgRuntimeDir string, home string, mountPath string) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
env := os.Environ()
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", xdgRuntimeDir))
env = append(env, fmt.Sprintf("HOME=%s", home))
env = append(env, "PODMAN_ALLOW_SINGLE_ID_MAPPING_IN_USERNS=1")
+ env = append(env, "USER=foo")
allArgs := append([]string{"run"}, args...)
allArgs = append(allArgs, "--rootfs", mountPath, "echo", "hello")
@@ -182,6 +187,10 @@ var _ = Describe("Podman rootless", func() {
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
+ cmd = rootlessTest.PodmanAsUser([]string{"restart", "-l", "-t", "0"}, 1000, 1000, env)
+ cmd.WaitWithDefaultTimeout()
+ Expect(cmd.ExitCode()).To(Equal(0))
+
canUseExec := canExec()
if canUseExec {
@@ -204,6 +213,10 @@ var _ = Describe("Podman rootless", func() {
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
+ cmd = rootlessTest.PodmanAsUser([]string{"inspect", "-l", "--type", "container", "--format", "{{ .State.Status }}"}, 1000, 1000, env)
+ cmd.WaitWithDefaultTimeout()
+ Expect(cmd.LineInOutputContains("exited")).To(BeTrue())
+
cmd = rootlessTest.PodmanAsUser([]string{"start", "-l"}, 1000, 1000, env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
@@ -216,6 +229,14 @@ var _ = Describe("Podman rootless", func() {
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
+ if len(args) == 0 {
+ cmd = rootlessTest.PodmanAsUser([]string{"inspect", "-l"}, 1000, 1000, env)
+ cmd.WaitWithDefaultTimeout()
+ Expect(cmd.ExitCode()).To(Equal(0))
+ data := cmd.InspectContainerToJSON()
+ Expect(data[0].HostConfig.NetworkMode).To(ContainSubstring("slirp4netns"))
+ }
+
if !canUseExec {
Skip("ioctl(NS_GET_PARENT) not supported.")
}
diff --git a/test/e2e/run_cgroup_parent_test.go b/test/e2e/run_cgroup_parent_test.go
index f266fafa4..57b3aa6b1 100644
--- a/test/e2e/run_cgroup_parent_test.go
+++ b/test/e2e/run_cgroup_parent_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreArtifact(fedoraMinimal)
})
@@ -32,7 +33,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
})
Specify("valid --cgroup-parent using cgroupfs", func() {
- if !containerized() {
+ if !Containerized() {
Skip("Must be containerized to run this test.")
}
cgroup := "/zzz"
@@ -45,7 +46,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
Specify("no --cgroup-parent", func() {
cgroup := "/libpod_parent"
- if !containerized() && podmanTest.CgroupManager != "cgroupfs" {
+ if !Containerized() && podmanTest.CgroupManager != "cgroupfs" {
cgroup = "/machine.slice"
}
run := podmanTest.Podman([]string{"run", fedoraMinimal, "cat", "/proc/self/cgroup"})
@@ -56,7 +57,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
})
Specify("valid --cgroup-parent using slice", func() {
- if containerized() || podmanTest.CgroupManager == "cgroupfs" {
+ if Containerized() || podmanTest.CgroupManager == "cgroupfs" {
Skip("Requires Systemd cgroup manager support")
}
cgroup := "aaaa.slice"
diff --git a/test/e2e/run_cleanup_test.go b/test/e2e/run_cleanup_test.go
index 02c70734a..5b60efa86 100644
--- a/test/e2e/run_cleanup_test.go
+++ b/test/e2e/run_cleanup_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run exit", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run exit", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -32,14 +33,14 @@ var _ = Describe("Podman run exit", func() {
})
It("podman run -d mount cleanup test", func() {
- mount := podmanTest.SystemExec("mount", nil)
+ mount := SystemExec("mount", nil)
mount.WaitWithDefaultTimeout()
out1 := mount.OutputToString()
result := podmanTest.Podman([]string{"create", "-dt", ALPINE, "echo", "hello"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
- mount = podmanTest.SystemExec("mount", nil)
+ mount = SystemExec("mount", nil)
mount.WaitWithDefaultTimeout()
out2 := mount.OutputToString()
Expect(out1).To(Equal(out2))
diff --git a/test/e2e/run_cpu_test.go b/test/e2e/run_cpu_test.go
index d56dfac64..343fe656c 100644
--- a/test/e2e/run_cpu_test.go
+++ b/test/e2e/run_cpu_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run cpu", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run cpu", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/run_device_test.go b/test/e2e/run_device_test.go
index fedd696d1..7f1f7b2d0 100644
--- a/test/e2e/run_device_test.go
+++ b/test/e2e/run_device_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run device", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run device", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/run_dns_test.go b/test/e2e/run_dns_test.go
index a617035a1..444c568e0 100644
--- a/test/e2e/run_dns_test.go
+++ b/test/e2e/run_dns_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run dns", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run dns", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/run_entrypoint_test.go b/test/e2e/run_entrypoint_test.go
index 5e4ef75e1..227037f92 100644
--- a/test/e2e/run_entrypoint_test.go
+++ b/test/e2e/run_entrypoint_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run entrypoint", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run entrypoint", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreArtifact(ALPINE)
})
diff --git a/test/e2e/run_exit_test.go b/test/e2e/run_exit_test.go
index bb38f7222..788cbd8dd 100644
--- a/test/e2e/run_exit_test.go
+++ b/test/e2e/run_exit_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run exit", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run exit", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/run_memory_test.go b/test/e2e/run_memory_test.go
index d1768138b..91a311e85 100644
--- a/test/e2e/run_memory_test.go
+++ b/test/e2e/run_memory_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run memory", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run memory", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 021825d4b..68b1f06de 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -4,15 +4,16 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
-var _ = Describe("Podman rmi", func() {
+var _ = Describe("Podman run networking", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
hostname, _ = os.Hostname()
)
@@ -21,7 +22,7 @@ var _ = Describe("Podman rmi", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -54,7 +55,7 @@ var _ = Describe("Podman rmi", func() {
session := podmanTest.Podman([]string{"run", "-dt", "--expose", "222-223", "-P", ALPINE, "/bin/sh"})
session.Wait(30)
Expect(session.ExitCode()).To(Equal(0))
- results := podmanTest.SystemExec("iptables", []string{"-t", "nat", "-L"})
+ results := SystemExec("iptables", []string{"-t", "nat", "-L"})
results.Wait(30)
Expect(results.ExitCode()).To(Equal(0))
Expect(results.OutputToString()).To(ContainSubstring("222"))
@@ -65,12 +66,12 @@ var _ = Describe("Podman rmi", func() {
session := podmanTest.Podman([]string{"run", "-dt", "-p", "80:8000", ALPINE, "/bin/sh"})
session.Wait(30)
Expect(session.ExitCode()).To(Equal(0))
- results := podmanTest.SystemExec("iptables", []string{"-t", "nat", "-L"})
+ results := SystemExec("iptables", []string{"-t", "nat", "-L"})
results.Wait(30)
Expect(results.ExitCode()).To(Equal(0))
Expect(results.OutputToString()).To(ContainSubstring("8000"))
- ncBusy := podmanTest.SystemExec("nc", []string{"-l", "-p", "80"})
+ ncBusy := SystemExec("nc", []string{"-l", "-p", "80"})
ncBusy.Wait(10)
Expect(ncBusy.ExitCode()).ToNot(Equal(0))
})
@@ -144,4 +145,35 @@ var _ = Describe("Podman rmi", func() {
match, _ := session.GrepString("foobar")
Expect(match).Should(BeTrue())
})
+
+ It("podman run --net container: copies hosts and resolv", func() {
+ ctrName := "ctr1"
+ ctr1 := podmanTest.RunTopContainer(ctrName)
+ ctr1.WaitWithDefaultTimeout()
+ Expect(ctr1.ExitCode()).To(Equal(0))
+
+ // Exec in and modify /etc/resolv.conf and /etc/hosts
+ exec1 := podmanTest.Podman([]string{"exec", ctrName, "sh", "-c", "echo nameserver 192.0.2.1 > /etc/resolv.conf"})
+ exec1.WaitWithDefaultTimeout()
+ Expect(exec1.ExitCode()).To(Equal(0))
+
+ exec2 := podmanTest.Podman([]string{"exec", ctrName, "sh", "-c", "echo 192.0.2.2 test1 > /etc/hosts"})
+ exec2.WaitWithDefaultTimeout()
+ Expect(exec2.ExitCode()).To(Equal(0))
+
+ ctrName2 := "ctr2"
+ ctr2 := podmanTest.Podman([]string{"run", "-d", "--net=container:" + ctrName, "--name", ctrName2, ALPINE, "top"})
+ ctr2.WaitWithDefaultTimeout()
+ Expect(ctr2.ExitCode()).To(Equal(0))
+
+ exec3 := podmanTest.Podman([]string{"exec", "-i", ctrName2, "cat", "/etc/resolv.conf"})
+ exec3.WaitWithDefaultTimeout()
+ Expect(exec3.ExitCode()).To(Equal(0))
+ Expect(exec3.OutputToString()).To(ContainSubstring("nameserver 192.0.2.1"))
+
+ exec4 := podmanTest.Podman([]string{"exec", "-i", ctrName2, "cat", "/etc/hosts"})
+ exec4.WaitWithDefaultTimeout()
+ Expect(exec4.ExitCode()).To(Equal(0))
+ Expect(exec4.OutputToString()).To(ContainSubstring("192.0.2.2 test1"))
+ })
})
diff --git a/test/e2e/run_ns_test.go b/test/e2e/run_ns_test.go
index 88c0b1ad2..e4dcc5adc 100644
--- a/test/e2e/run_ns_test.go
+++ b/test/e2e/run_ns_test.go
@@ -5,6 +5,7 @@ import (
"os"
"strings"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman run ns", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman run ns", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreArtifact(fedoraMinimal)
})
@@ -49,7 +50,7 @@ var _ = Describe("Podman run ns", func() {
})
It("podman run ipcns test", func() {
- setup := podmanTest.SystemExec("ls", []string{"--inode", "-d", "/dev/shm"})
+ setup := SystemExec("ls", []string{"--inode", "-d", "/dev/shm"})
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
hostShm := setup.OutputToString()
@@ -61,7 +62,7 @@ var _ = Describe("Podman run ns", func() {
})
It("podman run ipcns ipcmk host test", func() {
- setup := podmanTest.SystemExec("ipcmk", []string{"-M", "1024"})
+ setup := SystemExec("ipcmk", []string{"-M", "1024"})
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
output := strings.Split(setup.OutputToString(), " ")
@@ -70,7 +71,7 @@ var _ = Describe("Podman run ns", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- setup = podmanTest.SystemExec("ipcrm", []string{"-m", ipc})
+ setup = SystemExec("ipcrm", []string{"-m", ipc})
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
})
diff --git a/test/e2e/run_passwd_test.go b/test/e2e/run_passwd_test.go
index 0bea092bb..891f4fbd8 100644
--- a/test/e2e/run_passwd_test.go
+++ b/test/e2e/run_passwd_test.go
@@ -4,6 +4,7 @@ import (
"os"
"fmt"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run passwd", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run passwd", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/run_privileged_test.go b/test/e2e/run_privileged_test.go
index 0a62d8505..770ea3e6b 100644
--- a/test/e2e/run_privileged_test.go
+++ b/test/e2e/run_privileged_test.go
@@ -5,6 +5,7 @@ import (
"os"
"strings"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman privileged container tests", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman privileged container tests", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -42,7 +43,7 @@ var _ = Describe("Podman privileged container tests", func() {
})
It("podman privileged CapEff", func() {
- cap := podmanTest.SystemExec("grep", []string{"CapEff", "/proc/self/status"})
+ cap := SystemExec("grep", []string{"CapEff", "/proc/self/status"})
cap.WaitWithDefaultTimeout()
Expect(cap.ExitCode()).To(Equal(0))
@@ -53,7 +54,7 @@ var _ = Describe("Podman privileged container tests", func() {
})
It("podman cap-add CapEff", func() {
- cap := podmanTest.SystemExec("grep", []string{"CapEff", "/proc/self/status"})
+ cap := SystemExec("grep", []string{"CapEff", "/proc/self/status"})
cap.WaitWithDefaultTimeout()
Expect(cap.ExitCode()).To(Equal(0))
@@ -87,13 +88,13 @@ var _ = Describe("Podman privileged container tests", func() {
It("run no-new-privileges test", func() {
// Check if our kernel is new enough
- k, err := IsKernelNewThan("4.14")
+ k, err := IsKernelNewerThan("4.14")
Expect(err).To(BeNil())
if !k {
Skip("Kernel is not new enough to test this feature")
}
- cap := podmanTest.SystemExec("grep", []string{"NoNewPrivs", "/proc/self/status"})
+ cap := SystemExec("grep", []string{"NoNewPrivs", "/proc/self/status"})
cap.WaitWithDefaultTimeout()
if cap.ExitCode() != 0 {
Skip("Can't determine NoNewPrivs")
@@ -103,12 +104,12 @@ var _ = Describe("Podman privileged container tests", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- privs := strings.Split(cap.OutputToString(), ":")
+ privs := strings.Split(session.OutputToString(), ":")
session = podmanTest.Podman([]string{"run", "--security-opt", "no-new-privileges", "busybox", "grep", "NoNewPrivs", "/proc/self/status"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- noprivs := strings.Split(cap.OutputToString(), ":")
+ noprivs := strings.Split(session.OutputToString(), ":")
Expect(privs[1]).To(Not(Equal(noprivs[1])))
})
diff --git a/test/e2e/run_restart_test.go b/test/e2e/run_restart_test.go
index a2f0b8b41..018c66b45 100644
--- a/test/e2e/run_restart_test.go
+++ b/test/e2e/run_restart_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run restart containers", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman run restart containers", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -43,7 +44,7 @@ var _ = Describe("Podman run restart containers", func() {
It("Podman start after signal kill", func() {
_ = podmanTest.RunTopContainer("test1")
- ok := WaitForContainer(&podmanTest)
+ ok := WaitForContainer(podmanTest)
Expect(ok).To(BeTrue())
killSession := podmanTest.Podman([]string{"kill", "-s", "9", "test1"})
diff --git a/test/e2e/run_selinux_test.go b/test/e2e/run_selinux_test.go
index a1a18c780..418382e16 100644
--- a/test/e2e/run_selinux_test.go
+++ b/test/e2e/run_selinux_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/opencontainers/selinux/go-selinux"
@@ -13,7 +14,7 @@ var _ = Describe("Podman run", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman run", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
if !selinux.GetEnabled() {
Skip("SELinux not enabled")
diff --git a/test/e2e/run_signal_test.go b/test/e2e/run_signal_test.go
index 5de17108c..8f7894db8 100644
--- a/test/e2e/run_signal_test.go
+++ b/test/e2e/run_signal_test.go
@@ -4,39 +4,24 @@ import (
"fmt"
"io"
"os"
- "os/exec"
"path/filepath"
"strings"
"syscall"
"time"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
"golang.org/x/sys/unix"
)
-// PodmanPID execs podman and returns its PID
-func (p *PodmanTest) PodmanPID(args []string) (*PodmanSession, int) {
- podmanOptions := p.MakeOptions()
- podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...)
- podmanOptions = append(podmanOptions, args...)
- fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
- command := exec.Command(p.PodmanBinary, podmanOptions...)
- session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
- if err != nil {
- Fail(fmt.Sprintf("unable to run podman command: %s", strings.Join(podmanOptions, " ")))
- }
- return &PodmanSession{session}, command.Process.Pid
-}
-
const sigCatch = "trap \"echo FOO >> /h/fifo \" 8; echo READY >> /h/fifo; while :; do sleep 0.25; done"
var _ = Describe("Podman run with --sig-proxy", func() {
var (
tmpdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -44,7 +29,7 @@ var _ = Describe("Podman run with --sig-proxy", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tmpdir)
+ podmanTest = PodmanTestCreate(tmpdir)
podmanTest.RestoreArtifact(fedoraMinimal)
})
@@ -122,7 +107,7 @@ var _ = Describe("Podman run with --sig-proxy", func() {
signal := syscall.SIGPOLL
session, pid := podmanTest.PodmanPID([]string{"run", "--name", "test2", "--sig-proxy=false", fedoraMinimal, "bash", "-c", sigCatch})
- ok := WaitForContainer(&podmanTest)
+ ok := WaitForContainer(podmanTest)
Expect(ok).To(BeTrue())
// Kill with given signal
diff --git a/test/e2e/run_staticip_test.go b/test/e2e/run_staticip_test.go
index b69d15cee..749835b47 100644
--- a/test/e2e/run_staticip_test.go
+++ b/test/e2e/run_staticip_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman run with --ip flag", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,8 +21,10 @@ var _ = Describe("Podman run with --ip flag", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
+ // Cleanup the CNI networks used by the tests
+ os.RemoveAll("/var/lib/cni/networks/podman")
})
AfterEach(func() {
@@ -55,4 +58,13 @@ var _ = Describe("Podman run with --ip flag", func() {
Expect(result.ExitCode()).To(Equal(0))
Expect(result.OutputToString()).To(ContainSubstring("10.88.64.128/16"))
})
+
+ It("Podman run two containers with the same IP", func() {
+ result := podmanTest.Podman([]string{"run", "-d", "--ip", "10.88.64.128", ALPINE, "sleep", "999"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ result = podmanTest.Podman([]string{"run", "-ti", "--ip", "10.88.64.128", ALPINE, "ip", "addr"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).ToNot(Equal(0))
+ })
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 98bf66a67..2104991b2 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -3,10 +3,12 @@ package integration
import (
"fmt"
"io/ioutil"
+ "net"
"os"
"path/filepath"
"strings"
+ . "github.com/containers/libpod/test/utils"
"github.com/mrunalp/fileutils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -16,7 +18,7 @@ var _ = Describe("Podman run", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -24,7 +26,7 @@ var _ = Describe("Podman run", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -203,7 +205,7 @@ var _ = Describe("Podman run", func() {
Expect(session.OutputToString()).To(ContainSubstring("/run/test rw,relatime, shared"))
})
- It("podman run with mount flag", func() {
+ It("podman run with --mount flag", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("skip failing test on ppc64le")
}
@@ -225,7 +227,6 @@ var _ = Describe("Podman run", func() {
found, matches := session.GrepString("/run/test")
Expect(found).Should(BeTrue())
Expect(matches[0]).To(ContainSubstring("rw"))
- Expect(matches[0]).To(ContainSubstring("relatime"))
Expect(matches[0]).To(ContainSubstring("shared"))
mountPath = filepath.Join(podmanTest.TempDir, "scratchpad")
@@ -287,14 +288,27 @@ var _ = Describe("Podman run", func() {
})
It("podman run notify_socket", func() {
- sock := "/run/notify"
+ host := GetHostDistributionInfo()
+ if host.Distribution != "rhel" && host.Distribution != "centos" && host.Distribution != "fedora" {
+ Skip("this test requires a working runc")
+ }
+ sock := filepath.Join(podmanTest.TempDir, "notify")
+ addr := net.UnixAddr{
+ Name: sock,
+ Net: "unixgram",
+ }
+ socket, err := net.ListenUnixgram("unixgram", &addr)
+ Expect(err).To(BeNil())
+ defer os.Remove(sock)
+ defer socket.Close()
+
os.Setenv("NOTIFY_SOCKET", sock)
+ defer os.Unsetenv("NOTIFY_SOCKET")
+
session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "printenv", "NOTIFY_SOCKET"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ := session.GrepString(sock)
- Expect(match).Should(BeTrue())
- os.Unsetenv("NOTIFY_SOCKET")
+ Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 0))
})
It("podman run log-opt", func() {
@@ -322,7 +336,7 @@ var _ = Describe("Podman run", func() {
hooksDir := tempdir + "/hooks"
os.Mkdir(hooksDir, 0755)
fileutils.CopyFile("hooks/hooks.json", hooksDir)
- os.Setenv("HOOK_OPTION", fmt.Sprintf("--hooks-dir-path=%s", hooksDir))
+ os.Setenv("HOOK_OPTION", fmt.Sprintf("--hooks-dir=%s", hooksDir))
os.Remove(hcheck)
session := podmanTest.Podman([]string{"run", ALPINE, "ls"})
session.Wait(10)
@@ -355,7 +369,7 @@ var _ = Describe("Podman run", func() {
keyFile := filepath.Join(targetDir, "key.pem")
err = ioutil.WriteFile(keyFile, []byte(mountString), 0755)
Expect(err).To(BeNil())
- execSession := podmanTest.SystemExec("ln", []string{"-s", targetDir, filepath.Join(secretsDir, "mysymlink")})
+ execSession := SystemExec("ln", []string{"-s", targetDir, filepath.Join(secretsDir, "mysymlink")})
execSession.WaitWithDefaultTimeout()
Expect(execSession.ExitCode()).To(Equal(0))
@@ -608,7 +622,48 @@ USER mail`
session := podmanTest.Podman([]string{"run", "--volume", vol1 + ":/myvol1:z", "--volume", vol2 + ":/myvol2:shared,z", fedoraMinimal, "findmnt", "-o", "TARGET,PROPAGATION"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ := session.GrepString("shared")
+ match, shared := session.GrepString("shared")
Expect(match).Should(BeTrue())
+ // make sure it's only shared (and not 'shared,slave')
+ isSharedOnly := !strings.Contains(shared[0], "shared,")
+ Expect(isSharedOnly).Should(BeTrue())
+ })
+
+ It("podman run --pod automatically", func() {
+ session := podmanTest.Podman([]string{"run", "--pod", "new:foobar", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ check := podmanTest.Podman([]string{"pod", "ps", "--no-trunc"})
+ check.WaitWithDefaultTimeout()
+ match, _ := check.GrepString("foobar")
+ Expect(match).To(BeTrue())
+ })
+
+ It("podman run --rm should work", func() {
+ session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ numContainers := podmanTest.NumberOfContainers()
+ Expect(numContainers).To(Equal(0))
+ })
+
+ It("podman run --rm failed container should delete itself", func() {
+ session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ numContainers := podmanTest.NumberOfContainers()
+ Expect(numContainers).To(Equal(0))
+ })
+
+ It("podman run failed container should NOT delete itself", func() {
+ session := podmanTest.Podman([]string{"run", ALPINE, "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ numContainers := podmanTest.NumberOfContainers()
+ Expect(numContainers).To(Equal(1))
})
})
diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go
index f2a9af6bf..b1f3d08b4 100644
--- a/test/e2e/run_userns_test.go
+++ b/test/e2e/run_userns_test.go
@@ -4,6 +4,7 @@ import (
"os"
"fmt"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman UserNS support", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman UserNS support", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/runlabel_test.go b/test/e2e/runlabel_test.go
index 8d10d3c24..93a19ba30 100644
--- a/test/e2e/runlabel_test.go
+++ b/test/e2e/runlabel_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -20,7 +21,7 @@ var _ = Describe("podman container runlabel", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -28,7 +29,7 @@ var _ = Describe("podman container runlabel", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/save_test.go b/test/e2e/save_test.go
index 586215c46..9f64e49a7 100644
--- a/test/e2e/save_test.go
+++ b/test/e2e/save_test.go
@@ -5,6 +5,7 @@ import (
"os"
"path/filepath"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman save", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -21,7 +22,7 @@ var _ = Describe("Podman save", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 84f1efbca..0167e9062 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -5,6 +5,7 @@ import (
"os"
"strconv"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -13,7 +14,7 @@ var _ = Describe("Podman search", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
const regFileContents = `
[registries.search]
@@ -40,7 +41,7 @@ var _ = Describe("Podman search", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -136,7 +137,7 @@ var _ = Describe("Podman search", func() {
fakereg.WaitWithDefaultTimeout()
Expect(fakereg.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
@@ -159,7 +160,7 @@ var _ = Describe("Podman search", func() {
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry3", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry3", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
@@ -182,7 +183,7 @@ var _ = Describe("Podman search", func() {
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry4", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry4", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
@@ -214,7 +215,7 @@ var _ = Describe("Podman search", func() {
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry5", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry5", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
@@ -245,7 +246,7 @@ var _ = Describe("Podman search", func() {
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry6", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry6", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
@@ -276,7 +277,7 @@ var _ = Describe("Podman search", func() {
registryLocal.WaitWithDefaultTimeout()
Expect(registryLocal.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry7", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry7", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
@@ -284,7 +285,7 @@ var _ = Describe("Podman search", func() {
registryLocal.WaitWithDefaultTimeout()
Expect(registryLocal.ExitCode()).To(Equal(0))
- if !WaitContainerReady(&podmanTest, "registry8", "listening on", 20, 1) {
+ if !WaitContainerReady(podmanTest, "registry8", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:6000/my-alpine"})
diff --git a/test/e2e/start_test.go b/test/e2e/start_test.go
index 9218cda69..64245c609 100644
--- a/test/e2e/start_test.go
+++ b/test/e2e/start_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman start", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman start", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -88,4 +89,30 @@ var _ = Describe("Podman start", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman failed to start with --rm should delete the container", func() {
+ session := podmanTest.Podman([]string{"create", "-it", "--rm", ALPINE, "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ start := podmanTest.Podman([]string{"start", "-l"})
+ start.WaitWithDefaultTimeout()
+ Expect(start.ExitCode()).To(Not(Equal(0)))
+
+ numContainers := podmanTest.NumberOfContainers()
+ Expect(numContainers).To(BeZero())
+ })
+
+ It("podman failed to start without --rm should NOT delete the container", func() {
+ session := podmanTest.Podman([]string{"create", "-it", ALPINE, "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ start := podmanTest.Podman([]string{"start", "-l"})
+ start.WaitWithDefaultTimeout()
+ Expect(start.ExitCode()).To(Not(Equal(0)))
+
+ numContainers := podmanTest.NumberOfContainers()
+ Expect(numContainers).To(Equal(1))
+ })
})
diff --git a/test/e2e/stats_test.go b/test/e2e/stats_test.go
index e456d7114..be00d68b2 100644
--- a/test/e2e/stats_test.go
+++ b/test/e2e/stats_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman stats", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman stats", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go
index 9698a3110..5c229b9b4 100644
--- a/test/e2e/stop_test.go
+++ b/test/e2e/stop_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman stop", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman stop", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
@@ -56,6 +57,20 @@ var _ = Describe("Podman stop", func() {
Expect(session.ExitCode()).To(Equal(0))
})
+ It("podman stop stopped container", func() {
+ session := podmanTest.RunTopContainer("test1")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session2 := podmanTest.Podman([]string{"stop", "test1"})
+ session2.WaitWithDefaultTimeout()
+ Expect(session2.ExitCode()).To(Equal(0))
+
+ session3 := podmanTest.Podman([]string{"stop", "test1"})
+ session3.WaitWithDefaultTimeout()
+ Expect(session3.ExitCode()).To(Equal(0))
+ })
+
It("podman stop all containers", func() {
session := podmanTest.RunTopContainer("test1")
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go
new file mode 100644
index 000000000..ce67bb469
--- /dev/null
+++ b/test/e2e/systemd_test.go
@@ -0,0 +1,81 @@
+package integration
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman systemd", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ systemd_unit_file string
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ systemd_unit_file = `[Unit]
+Description=redis container
+[Service]
+Restart=always
+ExecStart=/usr/bin/podman start -a redis
+ExecStop=/usr/bin/podman stop -t 10 redis
+KillMode=process
+[Install]
+WantedBy=multi-user.target
+`
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman start container by systemd", func() {
+ if os.Getenv("SKIP_USERNS") != "" {
+ Skip("Skip userns tests.")
+ }
+
+ sys_file := ioutil.WriteFile("/etc/systemd/system/redis.service", []byte(systemd_unit_file), 0644)
+ Expect(sys_file).To(BeNil())
+
+ create := podmanTest.Podman([]string{"create", "-d", "--name", "redis", "redis"})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(Equal(0))
+
+ enable := SystemExec("bash", []string{"-c", "systemctl daemon-reload && systemctl enable --now redis"})
+ enable.WaitWithDefaultTimeout()
+ Expect(enable.ExitCode()).To(Equal(0))
+
+ start := SystemExec("bash", []string{"-c", "systemctl start redis"})
+ start.WaitWithDefaultTimeout()
+
+ logs := SystemExec("bash", []string{"-c", "journalctl -n 20 -u redis"})
+ logs.WaitWithDefaultTimeout()
+
+ status := SystemExec("bash", []string{"-c", "systemctl status redis"})
+ status.WaitWithDefaultTimeout()
+ Expect(status.OutputToString()).To(ContainSubstring("active (running)"))
+
+ cleanup := SystemExec("bash", []string{"-c", "systemctl stop redis && systemctl disable redis"})
+ cleanup.WaitWithDefaultTimeout()
+ Expect(cleanup.ExitCode()).To(Equal(0))
+ os.Remove("/etc/systemd/system/redis.service")
+ sys_clean := SystemExec("bash", []string{"-c", "systemctl daemon-reload"})
+ sys_clean.WaitWithDefaultTimeout()
+ Expect(sys_clean.ExitCode()).To(Equal(0))
+ })
+})
diff --git a/test/e2e/tag_test.go b/test/e2e/tag_test.go
index 1b58fbd30..53896d1a2 100644
--- a/test/e2e/tag_test.go
+++ b/test/e2e/tag_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman tag", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman tag", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/top_test.go b/test/e2e/top_test.go
index 9537c2f50..cfcf2a959 100644
--- a/test/e2e/top_test.go
+++ b/test/e2e/top_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman top", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman top", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/e2e/trust_test.go b/test/e2e/trust_test.go
new file mode 100644
index 000000000..bbf09eca4
--- /dev/null
+++ b/test/e2e/trust_test.go
@@ -0,0 +1,72 @@
+package integration
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman trust", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman image trust show", func() {
+ path, err := os.Getwd()
+ if err != nil {
+ os.Exit(1)
+ }
+ session := podmanTest.Podman([]string{"image", "trust", "show", "--registrypath", filepath.Dir(path), "--policypath", filepath.Join(filepath.Dir(path), "policy.json")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ outArray := session.OutputToStringArray()
+ Expect(len(outArray)).To(Equal(3))
+ Expect(outArray[0]).Should(ContainSubstring("accept"))
+ Expect(outArray[1]).Should(ContainSubstring("reject"))
+ Expect(outArray[2]).Should(ContainSubstring("signed"))
+ })
+
+ It("podman image trust set", func() {
+ path, err := os.Getwd()
+ if err != nil {
+ os.Exit(1)
+ }
+ session := podmanTest.Podman([]string{"image", "trust", "set", "--policypath", filepath.Join(filepath.Dir(path), "trust_set_test.json"), "-t", "accept", "default"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ var teststruct map[string][]map[string]string
+ policyContent, err := ioutil.ReadFile(filepath.Join(filepath.Dir(path), "trust_set_test.json"))
+ if err != nil {
+ os.Exit(1)
+ }
+ err = json.Unmarshal(policyContent, &teststruct)
+ if err != nil {
+ os.Exit(1)
+ }
+ Expect(teststruct["default"][0]["type"]).To(Equal("insecureAcceptAnything"))
+ })
+})
diff --git a/test/e2e/version_test.go b/test/e2e/version_test.go
index 6caf0e3dd..68a462bdb 100644
--- a/test/e2e/version_test.go
+++ b/test/e2e/version_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman version", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman version", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
})
AfterEach(func() {
diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go
new file mode 100644
index 000000000..50ee63f2a
--- /dev/null
+++ b/test/e2e/volume_create_test.go
@@ -0,0 +1,60 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman volume create", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.CleanupVolume()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman create volume", func() {
+ session := podmanTest.Podman([]string{"volume", "create"})
+ session.WaitWithDefaultTimeout()
+ volName := session.OutputToString()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ check := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ check.WaitWithDefaultTimeout()
+ match, _ := check.GrepString(volName)
+ Expect(match).To(BeTrue())
+ Expect(len(check.OutputToStringArray())).To(Equal(1))
+ })
+
+ It("podman create volume with name", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ volName := session.OutputToString()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ check := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ check.WaitWithDefaultTimeout()
+ match, _ := check.GrepString(volName)
+ Expect(match).To(BeTrue())
+ Expect(len(check.OutputToStringArray())).To(Equal(1))
+ })
+})
diff --git a/test/e2e/volume_inspect_test.go b/test/e2e/volume_inspect_test.go
new file mode 100644
index 000000000..d0d5a601e
--- /dev/null
+++ b/test/e2e/volume_inspect_test.go
@@ -0,0 +1,77 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman volume inspect", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.CleanupVolume()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman inspect volume", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ volName := session.OutputToString()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "inspect", volName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.IsJSONOutputValid()).To(BeTrue())
+ })
+
+ It("podman inspect volume with Go format", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ volName := session.OutputToString()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "inspect", "--format", "{{.Name}}", volName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Equal(volName))
+ })
+
+ It("podman inspect volume with --all flag", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol1"})
+ session.WaitWithDefaultTimeout()
+ volName1 := session.OutputToString()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "create", "myvol2"})
+ session.WaitWithDefaultTimeout()
+ volName2 := session.OutputToString()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "inspect", "--format", "{{.Name}}", "--all"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.OutputToStringArray()[0]).To(Equal(volName1))
+ Expect(session.OutputToStringArray()[1]).To(Equal(volName2))
+ })
+})
diff --git a/test/e2e/volume_ls_test.go b/test/e2e/volume_ls_test.go
new file mode 100644
index 000000000..119d29d9b
--- /dev/null
+++ b/test/e2e/volume_ls_test.go
@@ -0,0 +1,84 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman volume ls", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.CleanupVolume()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman ls volume", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ })
+
+ It("podman ls volume with JSON format", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls", "--format", "json"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.IsJSONOutputValid()).To(BeTrue())
+ })
+
+ It("podman ls volume with Go template", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls", "--format", "table {{.Name}} {{.Driver}} {{.Scope}}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ })
+
+ It("podman ls volume with --filter flag", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "--label", "foo=bar", "myvol"})
+ volName := session.OutputToString()
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls", "--filter", "label=foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.OutputToStringArray()[1]).To(ContainSubstring(volName))
+ })
+})
diff --git a/test/e2e/volume_prune_test.go b/test/e2e/volume_prune_test.go
new file mode 100644
index 000000000..8c0a10e77
--- /dev/null
+++ b/test/e2e/volume_prune_test.go
@@ -0,0 +1,64 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman volume prune", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.CleanupVolume()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman prune volume", func() {
+ session := podmanTest.Podman([]string{"volume", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"create", "-v", "myvol:/myvol", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(4))
+
+ session = podmanTest.Podman([]string{"volume", "prune", "--force"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+
+ podmanTest.Cleanup()
+ })
+})
diff --git a/test/e2e/volume_rm_test.go b/test/e2e/volume_rm_test.go
new file mode 100644
index 000000000..cebb09467
--- /dev/null
+++ b/test/e2e/volume_rm_test.go
@@ -0,0 +1,91 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman volume rm", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.CleanupVolume()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman rm volume", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "rm", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(0))
+ })
+
+ It("podman rm with --force flag", func() {
+ session := podmanTest.Podman([]string{"create", "-v", "myvol:/myvol", ALPINE, "ls"})
+ cid := session.OutputToString()
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "rm", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ Expect(session.ErrorToString()).To(ContainSubstring(cid))
+
+ session = podmanTest.Podman([]string{"volume", "rm", "-f", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(0))
+
+ podmanTest.Cleanup()
+ })
+
+ It("podman rm with --all flag", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "myvol"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "rm", "-a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(0))
+ })
+})
diff --git a/test/e2e/wait_test.go b/test/e2e/wait_test.go
index 8e7035204..a7e9b4c06 100644
--- a/test/e2e/wait_test.go
+++ b/test/e2e/wait_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ . "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -12,7 +13,7 @@ var _ = Describe("Podman wait", func() {
var (
tempdir string
err error
- podmanTest PodmanTest
+ podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
@@ -20,7 +21,7 @@ var _ = Describe("Podman wait", func() {
if err != nil {
os.Exit(1)
}
- podmanTest = PodmanCreate(tempdir)
+ podmanTest = PodmanTestCreate(tempdir)
podmanTest.RestoreAllArtifacts()
})
diff --git a/test/goecho/goecho.go b/test/goecho/goecho.go
new file mode 100644
index 000000000..1c8d2f586
--- /dev/null
+++ b/test/goecho/goecho.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "time"
+)
+
+func main() {
+ args := os.Args[1:]
+ exitCode := 0
+
+ for i := 0; i < len(args); i++ {
+ fmt.Fprintln(os.Stdout, args[i])
+ fmt.Fprintln(os.Stderr, args[i])
+ }
+
+ if len(args) > 1 {
+ num, _ := strconv.Atoi(args[1])
+ if args[0] == "exitcode" {
+ exitCode = num
+ }
+ if args[0] == "sleep" {
+ time.Sleep(time.Duration(num) * time.Second)
+ }
+ }
+ os.Exit(exitCode)
+}
diff --git a/test/install/Dockerfile.Fedora b/test/install/Dockerfile.Fedora
index 188e60328..3a7b472de 100644
--- a/test/install/Dockerfile.Fedora
+++ b/test/install/Dockerfile.Fedora
@@ -1,3 +1,3 @@
-FROM registry.fedoraproject.org/fedora:28
+FROM registry.fedoraproject.org/fedora:29
-RUN dnf install -y rpms/noarch/* rpms/x86_64/* \ No newline at end of file
+RUN dnf install -y rpms/noarch/* rpms/x86_64/*
diff --git a/test/system/libpod_suite_test.go b/test/system/libpod_suite_test.go
new file mode 100644
index 000000000..5de50e4e7
--- /dev/null
+++ b/test/system/libpod_suite_test.go
@@ -0,0 +1,217 @@
+package system
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var (
+ PODMAN_BINARY string
+ GLOBALOPTIONS = []string{"--cgroup-manager",
+ "--cni-config-dir",
+ "--config", "-c",
+ "--conmon",
+ "--cpu-profile",
+ "--log-level",
+ "--root",
+ "--tmpdir",
+ "--runroot",
+ "--runtime",
+ "--storage-driver",
+ "--storage-opt",
+ "--syslog",
+ }
+ PODMAN_SUBCMD = []string{"attach",
+ "commit",
+ "container",
+ "build",
+ "create",
+ "diff",
+ "exec",
+ "export",
+ "history",
+ "image",
+ "images",
+ "import",
+ "info",
+ "inspect",
+ "kill",
+ "load",
+ "login",
+ "logout",
+ "logs",
+ "mount",
+ "pause",
+ "ps",
+ "pod",
+ "port",
+ "pull",
+ "push",
+ "restart",
+ "rm",
+ "rmi",
+ "run",
+ "save",
+ "search",
+ "start",
+ "stats",
+ "stop",
+ "tag",
+ "top",
+ "umount",
+ "unpause",
+ "version",
+ "wait",
+ "h",
+ }
+ INTEGRATION_ROOT string
+ ARTIFACT_DIR = "/tmp/.artifacts"
+ ALPINE = "docker.io/library/alpine:latest"
+ BB = "docker.io/library/busybox:latest"
+ BB_GLIBC = "docker.io/library/busybox:glibc"
+ fedoraMinimal = "registry.fedoraproject.org/fedora-minimal:latest"
+ nginx = "quay.io/baude/alpine_nginx:latest"
+ redis = "docker.io/library/redis:alpine"
+ registry = "docker.io/library/registry:2"
+ infra = "k8s.gcr.io/pause:3.1"
+ defaultWaitTimeout = 90
+)
+
+// PodmanTestSystem struct for command line options
+type PodmanTestSystem struct {
+ PodmanTest
+ GlobalOptions map[string]string
+ PodmanCmdOptions map[string][]string
+}
+
+// TestLibpod ginkgo master function
+func TestLibpod(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Libpod Suite")
+}
+
+var _ = BeforeSuite(func() {
+})
+
+// PodmanTestCreate creates a PodmanTestSystem instance for the tests
+func PodmanTestCreate(tempDir string) *PodmanTestSystem {
+ var envKey string
+ globalOptions := make(map[string]string)
+ podmanCmdOptions := make(map[string][]string)
+
+ for _, n := range GLOBALOPTIONS {
+ envKey = strings.Replace(strings.ToUpper(strings.Trim(n, "-")), "-", "_", -1)
+ if isEnvSet(envKey) {
+ globalOptions[n] = os.Getenv(envKey)
+ }
+ }
+
+ for _, n := range PODMAN_SUBCMD {
+ envKey = strings.Replace("PODMAN_SUBCMD_OPTIONS", "SUBCMD", strings.ToUpper(n), -1)
+ if isEnvSet(envKey) {
+ podmanCmdOptions[n] = strings.Split(os.Getenv(envKey), " ")
+ }
+ }
+
+ podmanBinary := "podman"
+ if os.Getenv("PODMAN_BINARY") != "" {
+ podmanBinary = os.Getenv("PODMAN_BINARY")
+ }
+
+ p := &PodmanTestSystem{
+ PodmanTest: PodmanTest{
+ PodmanBinary: podmanBinary,
+ ArtifactPath: ARTIFACT_DIR,
+ TempDir: tempDir,
+ },
+ GlobalOptions: globalOptions,
+ PodmanCmdOptions: podmanCmdOptions,
+ }
+
+ p.PodmanMakeOptions = p.makeOptions
+
+ return p
+}
+
+func (p *PodmanTestSystem) Podman(args []string) *PodmanSession {
+ return p.PodmanBase(args)
+}
+
+//MakeOptions assembles all the podman options
+func (p *PodmanTestSystem) makeOptions(args []string) []string {
+ var addOptions, subArgs []string
+ for _, n := range GLOBALOPTIONS {
+ if p.GlobalOptions[n] != "" {
+ addOptions = append(addOptions, n, p.GlobalOptions[n])
+ }
+ }
+
+ if len(args) == 0 {
+ return addOptions
+ }
+
+ subCmd := args[0]
+ addOptions = append(addOptions, subCmd)
+ if subCmd == "unmount" {
+ subCmd = "umount"
+ }
+ if subCmd == "help" {
+ subCmd = "h"
+ }
+
+ if _, ok := p.PodmanCmdOptions[subCmd]; ok {
+ m := make(map[string]bool)
+ subArgs = p.PodmanCmdOptions[subCmd]
+ for i := 0; i < len(subArgs); i++ {
+ m[subArgs[i]] = true
+ }
+ for i := 1; i < len(args); i++ {
+ if _, ok := m[args[i]]; !ok {
+ subArgs = append(subArgs, args[i])
+ }
+ }
+ } else {
+ subArgs = args[1:]
+ }
+
+ addOptions = append(addOptions, subArgs...)
+
+ return addOptions
+}
+
+// Cleanup cleans up the temporary store
+func (p *PodmanTestSystem) Cleanup() {
+ // Remove all containers
+ stopall := p.Podman([]string{"stop", "-a", "--timeout", "0"})
+ stopall.WaitWithDefaultTimeout()
+
+ session := p.Podman([]string{"rm", "-fa"})
+ session.Wait(90)
+ // Nuke tempdir
+ if err := os.RemoveAll(p.TempDir); err != nil {
+ fmt.Printf("%q\n", err)
+ }
+}
+
+// CleanupPod cleans up the temporary store
+func (p *PodmanTestSystem) CleanupPod() {
+ // Remove all containers
+ session := p.Podman([]string{"pod", "rm", "-fa"})
+ session.Wait(90)
+ // Nuke tempdir
+ if err := os.RemoveAll(p.TempDir); err != nil {
+ fmt.Printf("%q\n", err)
+ }
+}
+
+// Check if the key is set in Env
+func isEnvSet(key string) bool {
+ _, set := os.LookupEnv(key)
+ return set
+}
diff --git a/test/system/version_test.go b/test/system/version_test.go
new file mode 100644
index 000000000..ada0093b7
--- /dev/null
+++ b/test/system/version_test.go
@@ -0,0 +1,51 @@
+package system
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman version test", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestSystem
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("Smoking test: podman version with extra args", func() {
+ logc := podmanTest.Podman([]string{"version", "anything", "-", "--"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc.ExitCode()).To(Equal(0))
+ ver := logc.OutputToString()
+ Expect(regexp.MatchString("Version:.*?Go Version:.*?OS/Arch", ver)).To(BeTrue())
+ })
+
+ It("Negative test: podman version with extra flag", func() {
+ logc := podmanTest.Podman([]string{"version", "--foo"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc.ExitCode()).NotTo(Equal(0))
+ err, _ := logc.GrepString("Incorrect Usage: flag provided but not defined: -foo")
+ Expect(err).To(BeTrue())
+ })
+
+})
diff --git a/test/utils/common_function_test.go b/test/utils/common_function_test.go
new file mode 100644
index 000000000..1648a4899
--- /dev/null
+++ b/test/utils/common_function_test.go
@@ -0,0 +1,150 @@
+package utils_test
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/extensions/table"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Common functions test", func() {
+ var defaultOSPath string
+ var defaultCgroupPath string
+
+ BeforeEach(func() {
+ defaultOSPath = OSReleasePath
+ defaultCgroupPath = ProcessOneCgroupPath
+ })
+
+ AfterEach(func() {
+ OSReleasePath = defaultOSPath
+ ProcessOneCgroupPath = defaultCgroupPath
+ })
+
+ It("Test CreateTempDirInTempDir", func() {
+ tmpDir, _ := CreateTempDirInTempDir()
+ _, err := os.Stat(tmpDir)
+ Expect(os.IsNotExist(err)).ShouldNot(BeTrue(), "Directory is not created as expect")
+ })
+
+ It("Test SystemExec", func() {
+ session := SystemExec(GoechoPath, []string{})
+ Expect(session.Command.Process).ShouldNot(BeNil(), "SystemExec can not start a process")
+ })
+
+ It("Test StringInSlice", func() {
+ testSlice := []string{"apple", "peach", "pear"}
+ Expect(StringInSlice("apple", testSlice)).To(BeTrue(), "apple should in ['apple', 'peach', 'pear']")
+ Expect(StringInSlice("banana", testSlice)).ShouldNot(BeTrue(), "banana should not in ['apple', 'peach', 'pear']")
+ Expect(StringInSlice("anything", []string{})).ShouldNot(BeTrue(), "anything should not in empty slice")
+ })
+
+ DescribeTable("Test GetHostDistributionInfo",
+ func(path, id, ver string, empty bool) {
+ txt := fmt.Sprintf("ID=%s\nVERSION_ID=%s", id, ver)
+ if !empty {
+ f, _ := os.Create(path)
+ f.WriteString(txt)
+ f.Close()
+ }
+
+ OSReleasePath = path
+ host := GetHostDistributionInfo()
+ if empty {
+ Expect(host).To(Equal(HostOS{}), "HostOs should be empty.")
+ } else {
+ Expect(host.Distribution).To(Equal(strings.Trim(id, "\"")))
+ Expect(host.Version).To(Equal(strings.Trim(ver, "\"")))
+ }
+ },
+ Entry("Configure file is not exist.", "/tmp/notexist", "", "", true),
+ Entry("Item value with and without \"", "/tmp/os-release.test", "fedora", "\"28\"", false),
+ Entry("Item empty with and without \"", "/tmp/os-release.test", "", "\"\"", false),
+ )
+
+ DescribeTable("Test IsKernelNewerThan",
+ func(kv string, expect, isNil bool) {
+ newer, err := IsKernelNewerThan(kv)
+ Expect(newer).To(Equal(expect), "Version compare results is not as expect.")
+ Expect(err == nil).To(Equal(isNil), "Error is not as expect.")
+ },
+ Entry("Invlid kernel version: 0", "0", false, false),
+ Entry("Older kernel version:0.0", "0.0", true, true),
+ Entry("Newer kernel version: 100.17.14", "100.17.14", false, true),
+ Entry("Invlid kernel version: I am not a kernel version", "I am not a kernel version", false, false),
+ )
+
+ DescribeTable("Test TestIsCommandAvailable",
+ func(cmd string, expect bool) {
+ cmdExist := IsCommandAvailable(cmd)
+ Expect(cmdExist).To(Equal(expect))
+ },
+ Entry("Command exist", GoechoPath, true),
+ Entry("Command exist", "Fakecmd", false),
+ )
+
+ It("Test WriteJsonFile", func() {
+ type testJson struct {
+ Item1 int
+ Item2 []string
+ }
+ compareData := &testJson{}
+
+ testData := &testJson{
+ Item1: 5,
+ Item2: []string{"test"},
+ }
+
+ testByte, _ := json.Marshal(testData)
+ err := WriteJsonFile(testByte, "/tmp/testJson")
+
+ Expect(err).To(BeNil(), "Failed to write JSON to file.")
+
+ read, err := os.Open("/tmp/testJson")
+ defer read.Close()
+
+ Expect(err).To(BeNil(), "Can not find the JSON file after we write it.")
+
+ bytes, _ := ioutil.ReadAll(read)
+ json.Unmarshal(bytes, compareData)
+
+ Expect(reflect.DeepEqual(testData, compareData)).To(BeTrue(), "Data chaned after we store it to file.")
+ })
+
+ DescribeTable("Test Containerized",
+ func(path string, setEnv, createFile, expect bool) {
+ if setEnv && (os.Getenv("container") == "") {
+ os.Setenv("container", "test")
+ defer os.Setenv("container", "")
+ }
+ if !setEnv && (os.Getenv("container") != "") {
+ containerized := os.Getenv("container")
+ os.Setenv("container", "")
+ defer os.Setenv("container", containerized)
+ }
+ txt := "1:test:/"
+ if expect {
+ txt = "2:docker:/"
+ }
+ if createFile {
+ f, _ := os.Create(path)
+ f.WriteString(txt)
+ f.Close()
+ }
+ ProcessOneCgroupPath = path
+ Expect(Containerized()).To(Equal(expect))
+ },
+ Entry("Set container in env", "", true, false, true),
+ Entry("Can not read from file", "/tmp/notexist", false, false, false),
+ Entry("Docker in cgroup file", "/tmp/cgroup.test", false, true, true),
+ Entry("Docker not in cgroup file", "/tmp/cgroup.test", false, true, false),
+ )
+
+})
diff --git a/test/utils/podmansession_test.go b/test/utils/podmansession_test.go
new file mode 100644
index 000000000..de8c20b24
--- /dev/null
+++ b/test/utils/podmansession_test.go
@@ -0,0 +1,90 @@
+package utils_test
+
+import (
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("PodmanSession test", func() {
+ var session *PodmanSession
+
+ BeforeEach(func() {
+ session = StartFakeCmdSession([]string{"PodmanSession", "test", "Podman Session"})
+ session.WaitWithDefaultTimeout()
+ })
+
+ It("Test OutputToString", func() {
+ Expect(session.OutputToString()).To(Equal("PodmanSession test Podman Session"))
+ })
+
+ It("Test OutputToStringArray", func() {
+ Expect(session.OutputToStringArray()).To(Equal([]string{"PodmanSession", "test", "Podman Session"}))
+ })
+
+ It("Test ErrorToString", func() {
+ Expect(session.ErrorToString()).To(Equal("PodmanSession test Podman Session"))
+ })
+
+ It("Test ErrorToStringArray", func() {
+ Expect(session.ErrorToStringArray()).To(Equal([]string{"PodmanSession", "test", "Podman Session", ""}))
+ })
+
+ It("Test GrepString", func() {
+ match, backStr := session.GrepString("Session")
+ Expect(match).To(BeTrue())
+ Expect(backStr).To(Equal([]string{"PodmanSession", "Podman Session"}))
+
+ match, backStr = session.GrepString("I am not here")
+ Expect(match).To(Not(BeTrue()))
+ Expect(backStr).To(BeNil())
+
+ })
+
+ It("Test ErrorGrepString", func() {
+ match, backStr := session.ErrorGrepString("Session")
+ Expect(match).To(BeTrue())
+ Expect(backStr).To(Equal([]string{"PodmanSession", "Podman Session"}))
+
+ match, backStr = session.ErrorGrepString("I am not here")
+ Expect(match).To(Not(BeTrue()))
+ Expect(backStr).To(BeNil())
+
+ })
+
+ It("Test LineInOutputStartsWith", func() {
+ Expect(session.LineInOuputStartsWith("Podman")).To(BeTrue())
+ Expect(session.LineInOuputStartsWith("Session")).To(Not(BeTrue()))
+ })
+
+ It("Test LineInOutputContains", func() {
+ Expect(session.LineInOutputContains("Podman")).To(BeTrue())
+ Expect(session.LineInOutputContains("Session")).To(BeTrue())
+ Expect(session.LineInOutputContains("I am not here")).To(Not(BeTrue()))
+ })
+
+ It("Test LineInOutputContainsTag", func() {
+ session = StartFakeCmdSession([]string{"HEAD LINE", "docker.io/library/busybox latest e1ddd7948a1c 5 weeks ago 1.38MB"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.LineInOutputContainsTag("docker.io/library/busybox", "latest")).To(BeTrue())
+ Expect(session.LineInOutputContainsTag("busybox", "latest")).To(Not(BeTrue()))
+ })
+
+ It("Test IsJSONOutputValid", func() {
+ session = StartFakeCmdSession([]string{`{"page":1,"fruits":["apple","peach","pear"]}`})
+ session.WaitWithDefaultTimeout()
+ Expect(session.IsJSONOutputValid()).To(BeTrue())
+
+ session = StartFakeCmdSession([]string{"I am not JSON"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.IsJSONOutputValid()).To(Not(BeTrue()))
+ })
+
+ It("Test WaitWithDefaultTimeout", func() {
+ session = StartFakeCmdSession([]string{"sleep", "2"})
+ Expect(session.ExitCode()).Should(Equal(-1))
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).Should(Equal(0))
+ })
+
+})
diff --git a/test/utils/podmantest_test.go b/test/utils/podmantest_test.go
new file mode 100644
index 000000000..60e3e2a97
--- /dev/null
+++ b/test/utils/podmantest_test.go
@@ -0,0 +1,74 @@
+package utils_test
+
+import (
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("PodmanTest test", func() {
+ var podmanTest *FakePodmanTest
+
+ BeforeEach(func() {
+ podmanTest = FakePodmanTestCreate()
+ })
+
+ AfterEach(func() {
+ FakeOutputs = make(map[string][]string)
+ })
+
+ It("Test PodmanAsUserBase", func() {
+ FakeOutputs["check"] = []string{"check"}
+ os.Setenv("HOOK_OPTION", "hook_option")
+ env := os.Environ()
+ session := podmanTest.PodmanAsUserBase([]string{"check"}, 1000, 1000, env)
+ os.Unsetenv("HOOK_OPTION")
+ session.WaitWithDefaultTimeout()
+ Expect(session.Command.Process).ShouldNot(BeNil())
+ })
+
+ It("Test NumberOfContainersRunning", func() {
+ FakeOutputs["ps -q"] = []string{"one", "two"}
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
+ })
+
+ It("Test NumberOfContainers", func() {
+ FakeOutputs["ps -aq"] = []string{"one", "two"}
+ Expect(podmanTest.NumberOfContainers()).To(Equal(2))
+ })
+
+ It("Test NumberOfPods", func() {
+ FakeOutputs["pod ps -q"] = []string{"one", "two"}
+ Expect(podmanTest.NumberOfPods()).To(Equal(2))
+ })
+
+ It("Test WaitForContainer", func() {
+ FakeOutputs["ps -q"] = []string{"one", "two"}
+ Expect(WaitForContainer(podmanTest)).To(BeTrue())
+
+ FakeOutputs["ps -q"] = []string{"one"}
+ Expect(WaitForContainer(podmanTest)).To(BeTrue())
+
+ FakeOutputs["ps -q"] = []string{""}
+ Expect(WaitForContainer(podmanTest)).To(Not(BeTrue()))
+ })
+
+ It("Test GetContainerStatus", func() {
+ FakeOutputs["ps --all --format={{.Status}}"] = []string{"Need func update"}
+ Expect(podmanTest.GetContainerStatus()).To(Equal("Need func update"))
+ })
+
+ It("Test WaitContainerReady", func() {
+ FakeOutputs["logs testimage"] = []string{""}
+ Expect(WaitContainerReady(podmanTest, "testimage", "ready", 2, 1)).To(Not(BeTrue()))
+
+ FakeOutputs["logs testimage"] = []string{"I am ready"}
+ Expect(WaitContainerReady(podmanTest, "testimage", "am ready", 2, 1)).To(BeTrue())
+
+ FakeOutputs["logs testimage"] = []string{"I am ready"}
+ Expect(WaitContainerReady(podmanTest, "testimage", "", 2, 1)).To(BeTrue())
+ })
+
+})
diff --git a/test/utils/utils.go b/test/utils/utils.go
new file mode 100644
index 000000000..288c768d4
--- /dev/null
+++ b/test/utils/utils.go
@@ -0,0 +1,432 @@
+package utils
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/containers/storage/pkg/parsers/kernel"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/onsi/gomega/gexec"
+)
+
+var (
+ defaultWaitTimeout = 90
+ OSReleasePath = "/etc/os-release"
+ ProcessOneCgroupPath = "/proc/1/cgroup"
+)
+
+// PodmanTestCommon contains common functions will be updated later in
+// the inheritance structs
+type PodmanTestCommon interface {
+ MakeOptions(args []string) []string
+ WaitForContainer() bool
+ WaitContainerReady(id string, expStr string, timeout int, step int) bool
+}
+
+// PodmanTest struct for command line options
+type PodmanTest struct {
+ PodmanMakeOptions func(args []string) []string
+ PodmanBinary string
+ ArtifactPath string
+ TempDir string
+}
+
+// PodmanSession wraps the gexec.session so we can extend it
+type PodmanSession struct {
+ *gexec.Session
+}
+
+// HostOS is a simple struct for the test os
+type HostOS struct {
+ Distribution string
+ Version string
+ Arch string
+}
+
+// MakeOptions assembles all podman options
+func (p *PodmanTest) MakeOptions(args []string) []string {
+ return p.PodmanMakeOptions(args)
+}
+
+// PodmanAsUserBase exec podman as user. uid and gid is set for credentials useage. env is used
+// to record the env for debugging
+func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, env []string) *PodmanSession {
+ var command *exec.Cmd
+ podmanOptions := p.MakeOptions(args)
+
+ if env == nil {
+ fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
+ } else {
+ fmt.Printf("Running: (env: %v) %s %s\n", env, p.PodmanBinary, strings.Join(podmanOptions, " "))
+ }
+ if uid != 0 || gid != 0 {
+ nsEnterOpts := append([]string{"--userspec", fmt.Sprintf("%d:%d", uid, gid), "/", p.PodmanBinary}, podmanOptions...)
+ command = exec.Command("chroot", nsEnterOpts...)
+ } else {
+ command = exec.Command(p.PodmanBinary, podmanOptions...)
+ }
+ if env != nil {
+ command.Env = env
+ }
+
+ session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
+ if err != nil {
+ Fail(fmt.Sprintf("unable to run podman command: %s\n%v", strings.Join(podmanOptions, " "), err))
+ }
+ return &PodmanSession{session}
+}
+
+// PodmanBase exec podman with default env.
+func (p *PodmanTest) PodmanBase(args []string) *PodmanSession {
+ return p.PodmanAsUserBase(args, 0, 0, nil)
+}
+
+// WaitForContainer waits on a started container
+func (p *PodmanTest) WaitForContainer() bool {
+ for i := 0; i < 10; i++ {
+ if p.NumberOfContainersRunning() > 0 {
+ return true
+ }
+ time.Sleep(1 * time.Second)
+ }
+ return false
+}
+
+// NumberOfContainersRunning returns an int of how many
+// containers are currently running.
+func (p *PodmanTest) NumberOfContainersRunning() int {
+ var containers []string
+ ps := p.PodmanBase([]string{"ps", "-q"})
+ ps.WaitWithDefaultTimeout()
+ Expect(ps.ExitCode()).To(Equal(0))
+ for _, i := range ps.OutputToStringArray() {
+ if i != "" {
+ containers = append(containers, i)
+ }
+ }
+ return len(containers)
+}
+
+// NumberOfContainers returns an int of how many
+// containers are currently defined.
+func (p *PodmanTest) NumberOfContainers() int {
+ var containers []string
+ ps := p.PodmanBase([]string{"ps", "-aq"})
+ ps.WaitWithDefaultTimeout()
+ Expect(ps.ExitCode()).To(Equal(0))
+ for _, i := range ps.OutputToStringArray() {
+ if i != "" {
+ containers = append(containers, i)
+ }
+ }
+ return len(containers)
+}
+
+// NumberOfPods returns an int of how many
+// pods are currently defined.
+func (p *PodmanTest) NumberOfPods() int {
+ var pods []string
+ ps := p.PodmanBase([]string{"pod", "ps", "-q"})
+ ps.WaitWithDefaultTimeout()
+ Expect(ps.ExitCode()).To(Equal(0))
+ for _, i := range ps.OutputToStringArray() {
+ if i != "" {
+ pods = append(pods, i)
+ }
+ }
+ return len(pods)
+}
+
+// GetContainerStatus returns the containers state.
+// This function assumes only one container is active.
+func (p *PodmanTest) GetContainerStatus() string {
+ var podmanArgs = []string{"ps"}
+ podmanArgs = append(podmanArgs, "--all", "--format={{.Status}}")
+ session := p.PodmanBase(podmanArgs)
+ session.WaitWithDefaultTimeout()
+ return session.OutputToString()
+}
+
+// WaitContainerReady waits process or service inside container start, and ready to be used.
+func (p *PodmanTest) WaitContainerReady(id string, expStr string, timeout int, step int) bool {
+ startTime := time.Now()
+ s := p.PodmanBase([]string{"logs", id})
+ s.WaitWithDefaultTimeout()
+
+ for {
+ if time.Since(startTime) >= time.Duration(timeout)*time.Second {
+ fmt.Printf("Container %s is not ready in %ds", id, timeout)
+ return false
+ }
+
+ if strings.Contains(s.OutputToString(), expStr) {
+ return true
+ }
+ time.Sleep(time.Duration(step) * time.Second)
+ s = p.PodmanBase([]string{"logs", id})
+ s.WaitWithDefaultTimeout()
+ }
+}
+
+// WaitForContainer is a wrapper function for accept inheritance PodmanTest struct.
+func WaitForContainer(p PodmanTestCommon) bool {
+ return p.WaitForContainer()
+}
+
+// WaitForContainerReady is a wrapper function for accept inheritance PodmanTest struct.
+func WaitContainerReady(p PodmanTestCommon, id string, expStr string, timeout int, step int) bool {
+ return p.WaitContainerReady(id, expStr, timeout, step)
+}
+
+// OutputToString formats session output to string
+func (s *PodmanSession) OutputToString() string {
+ fields := strings.Fields(fmt.Sprintf("%s", s.Out.Contents()))
+ return strings.Join(fields, " ")
+}
+
+// OutputToStringArray returns the output as a []string
+// where each array item is a line split by newline
+func (s *PodmanSession) OutputToStringArray() []string {
+ var results []string
+ output := fmt.Sprintf("%s", s.Out.Contents())
+ for _, line := range strings.Split(output, "\n") {
+ if line != "" {
+ results = append(results, line)
+ }
+ }
+ return results
+}
+
+// ErrorToString formats session stderr to string
+func (s *PodmanSession) ErrorToString() string {
+ fields := strings.Fields(fmt.Sprintf("%s", s.Err.Contents()))
+ return strings.Join(fields, " ")
+}
+
+// ErrorToStringArray returns the stderr output as a []string
+// where each array item is a line split by newline
+func (s *PodmanSession) ErrorToStringArray() []string {
+ output := fmt.Sprintf("%s", s.Err.Contents())
+ return strings.Split(output, "\n")
+}
+
+// GrepString takes session output and behaves like grep. it returns a bool
+// if successful and an array of strings on positive matches
+func (s *PodmanSession) GrepString(term string) (bool, []string) {
+ var (
+ greps []string
+ matches bool
+ )
+
+ for _, line := range s.OutputToStringArray() {
+ if strings.Contains(line, term) {
+ matches = true
+ greps = append(greps, line)
+ }
+ }
+ return matches, greps
+}
+
+// ErrorGrepString takes session stderr output and behaves like grep. it returns a bool
+// if successful and an array of strings on positive matches
+func (s *PodmanSession) ErrorGrepString(term string) (bool, []string) {
+ var (
+ greps []string
+ matches bool
+ )
+
+ for _, line := range s.ErrorToStringArray() {
+ if strings.Contains(line, term) {
+ matches = true
+ greps = append(greps, line)
+ }
+ }
+ return matches, greps
+}
+
+//LineInOutputStartsWith returns true if a line in a
+// session output starts with the supplied string
+func (s *PodmanSession) LineInOuputStartsWith(term string) bool {
+ for _, i := range s.OutputToStringArray() {
+ if strings.HasPrefix(i, term) {
+ return true
+ }
+ }
+ return false
+}
+
+//LineInOutputContains returns true if a line in a
+// session output starts with the supplied string
+func (s *PodmanSession) LineInOutputContains(term string) bool {
+ for _, i := range s.OutputToStringArray() {
+ if strings.Contains(i, term) {
+ return true
+ }
+ }
+ return false
+}
+
+//LineInOutputContainsTag returns true if a line in the
+// session's output contains the repo-tag pair as returned
+// by podman-images(1).
+func (s *PodmanSession) LineInOutputContainsTag(repo, tag string) bool {
+ tagMap := tagOutputToMap(s.OutputToStringArray())
+ for r, t := range tagMap {
+ if repo == r && tag == t {
+ return true
+ }
+ }
+ return false
+}
+
+// IsJSONOutputValid attempts to unmarshal the session buffer
+// and if successful, returns true, else false
+func (s *PodmanSession) IsJSONOutputValid() bool {
+ var i interface{}
+ if err := json.Unmarshal(s.Out.Contents(), &i); err != nil {
+ fmt.Println(err)
+ return false
+ }
+ return true
+}
+
+// WaitWithDefaultTimeout waits for process finished with defaultWaitTimeout
+func (s *PodmanSession) WaitWithDefaultTimeout() {
+ s.Wait(defaultWaitTimeout)
+ fmt.Println("output:", s.OutputToString())
+}
+
+// CreateTempDirinTempDir create a temp dir with prefix podman_test
+func CreateTempDirInTempDir() (string, error) {
+ return ioutil.TempDir("", "podman_test")
+}
+
+// SystemExec is used to exec a system command to check its exit code or output
+func SystemExec(command string, args []string) *PodmanSession {
+ c := exec.Command(command, args...)
+ session, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)
+ if err != nil {
+ Fail(fmt.Sprintf("unable to run command: %s %s", command, strings.Join(args, " ")))
+ }
+ return &PodmanSession{session}
+}
+
+// StringInSlice determines if a string is in a string slice, returns bool
+func StringInSlice(s string, sl []string) bool {
+ for _, i := range sl {
+ if i == s {
+ return true
+ }
+ }
+ return false
+}
+
+//tagOutPutToMap parses each string in imagesOutput and returns
+// a map of repo:tag pairs. Notice, the first array item will
+// be skipped as it's considered to be the header.
+func tagOutputToMap(imagesOutput []string) map[string]string {
+ m := make(map[string]string)
+ // iterate over output but skip the header
+ for _, i := range imagesOutput[1:] {
+ tmp := []string{}
+ for _, x := range strings.Split(i, " ") {
+ if x != "" {
+ tmp = append(tmp, x)
+ }
+ }
+ // podman-images(1) return a list like output
+ // in the format of "Repository Tag [...]"
+ if len(tmp) < 2 {
+ continue
+ }
+ m[tmp[0]] = tmp[1]
+ }
+ return m
+}
+
+//GetHostDistributionInfo returns a struct with its distribution name and version
+func GetHostDistributionInfo() HostOS {
+ f, err := os.Open(OSReleasePath)
+ defer f.Close()
+ if err != nil {
+ return HostOS{}
+ }
+
+ l := bufio.NewScanner(f)
+ host := HostOS{}
+ host.Arch = runtime.GOARCH
+ for l.Scan() {
+ if strings.HasPrefix(l.Text(), "ID=") {
+ host.Distribution = strings.Replace(strings.TrimSpace(strings.Join(strings.Split(l.Text(), "=")[1:], "")), "\"", "", -1)
+ }
+ if strings.HasPrefix(l.Text(), "VERSION_ID=") {
+ host.Version = strings.Replace(strings.TrimSpace(strings.Join(strings.Split(l.Text(), "=")[1:], "")), "\"", "", -1)
+ }
+ }
+ return host
+}
+
+// IsKernelNewerThan compares the current kernel version to one provided. If
+// the kernel is equal to or greater, returns true
+func IsKernelNewerThan(version string) (bool, error) {
+ inputVersion, err := kernel.ParseRelease(version)
+ if err != nil {
+ return false, err
+ }
+ kv, err := kernel.GetKernelVersion()
+ if err != nil {
+ return false, err
+ }
+
+ // CompareKernelVersion compares two kernel.VersionInfo structs.
+ // Returns -1 if a < b, 0 if a == b, 1 it a > b
+ result := kernel.CompareKernelVersion(*kv, *inputVersion)
+ if result >= 0 {
+ return true, nil
+ }
+ return false, nil
+
+}
+
+//IsCommandAvaible check if command exist
+func IsCommandAvailable(command string) bool {
+ check := exec.Command("bash", "-c", strings.Join([]string{"command -v", command}, " "))
+ err := check.Run()
+ if err != nil {
+ return false
+ }
+ return true
+}
+
+// WriteJsonFile write json format data to a json file
+func WriteJsonFile(data []byte, filePath string) error {
+ var jsonData map[string]interface{}
+ json.Unmarshal(data, &jsonData)
+ formatJson, _ := json.MarshalIndent(jsonData, "", " ")
+ return ioutil.WriteFile(filePath, formatJson, 0644)
+}
+
+// Containerized check the podman command run inside container
+func Containerized() bool {
+ container := os.Getenv("container")
+ if container != "" {
+ return true
+ }
+ b, err := ioutil.ReadFile(ProcessOneCgroupPath)
+ if err != nil {
+ // shrug, if we cannot read that file, return false
+ return false
+ }
+ if strings.Index(string(b), "docker") > -1 {
+ return true
+ }
+ return false
+}
diff --git a/test/utils/utils_suite_test.go b/test/utils/utils_suite_test.go
new file mode 100644
index 000000000..b1100892b
--- /dev/null
+++ b/test/utils/utils_suite_test.go
@@ -0,0 +1,52 @@
+package utils_test
+
+import (
+ "fmt"
+ "io"
+ "os/exec"
+ "strings"
+ "testing"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/onsi/gomega/gexec"
+)
+
+var FakeOutputs map[string][]string
+var GoechoPath = "../goecho/goecho"
+
+type FakePodmanTest struct {
+ PodmanTest
+}
+
+func FakePodmanTestCreate() *FakePodmanTest {
+ FakeOutputs = make(map[string][]string)
+ p := &FakePodmanTest{
+ PodmanTest: PodmanTest{
+ PodmanBinary: GoechoPath,
+ },
+ }
+
+ p.PodmanMakeOptions = p.makeOptions
+ return p
+}
+
+func (p *FakePodmanTest) makeOptions(args []string) []string {
+ return FakeOutputs[strings.Join(args, " ")]
+}
+
+func StartFakeCmdSession(args []string) *PodmanSession {
+ var outWriter, errWriter io.Writer
+ command := exec.Command(GoechoPath, args...)
+ session, err := gexec.Start(command, outWriter, errWriter)
+ if err != nil {
+ fmt.Println(err)
+ }
+ return &PodmanSession{session}
+}
+
+func TestUtils(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Unit test for test utils package")
+}
diff --git a/troubleshooting.md b/troubleshooting.md
index db36d1bb8..574196a69 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -10,7 +10,7 @@
A large number of issues reported against Podman are often found to already be fixed
in more current versions of the project. Before reporting an issue, please verify the
version you are running with `podman version` and compare it to the lastest release
-documented on the top of Podman's [README.md](README.md).
+documented on the top of Podman's [README.md](README.md).
If they differ, please update your version of PODMAN to the latest possible
and retry your command before reporting the issue.
@@ -68,4 +68,62 @@ communicate with a registry and not use tls verification.
* Turn off tls verification by passing false to the tls-verification option.
* I.e. `podman push --tls-verify=false alpine docker://localhost:5000/myalpine:latest`
+
+---
+### 4) Rootless: could not get runtime - database configuration mismatch
+
+In Podman release 0.11.1, a default path for rootless containers was changed,
+potentially causing rootless Podman to be unable to function. The new default
+path is not a problem for new installations, but existing installations will
+need to work around it with the following fix.
+
+#### Symptom
+
+```console
+$ podman info
+could not get runtime: database run root /run/user/1000/run does not match our run root /run/user/1000: database configuration mismatch
+```
+
+#### Solution
+
+To work around the new default path, we can manually set the path Podman is
+expecting in a configuration file.
+
+First, we need to make a new local configuration file for rootless Podman.
+* `mkdir -p ~/.config/containers`
+* `cp /usr/share/containers/libpod.conf ~/.config/containers`
+
+Next, edit the new local configuration file
+(`~/.config/containers/libpod.conf`) with your favorite editor. Comment out the
+line starting with `cgroup_manager` by adding a `#` character at the beginning
+of the line, and change the path in the line starting with `tmp_dir` to point to
+the first path in the error message Podman gave (in this case,
+`/run/user/1000/run`).
+
---
+### 4) rootless containers cannot ping hosts
+
+When using the ping command from a non-root container, the command may
+fail because of a lack of privileges.
+
+#### Symptom
+
+```console
+$ podman run --rm fedora ping -W10 -c1 redhat.com
+PING redhat.com (209.132.183.105): 56 data bytes
+
+--- redhat.com ping statistics ---
+1 packets transmitted, 0 packets received, 100% packet loss
+```
+
+#### Solution
+
+It is most likely necessary to enable unprivileged pings on the host.
+Be sure the UID of the user is part of the range in the
+`/proc/sys/net/ipv4/ping_group_range` file.
+
+To change its value you can use something like: `sysctl -w
+"net.ipv4.ping_group_range=0 2000000"`.
+
+To make the change persistent, you'll need to add a file in
+`/etc/sysctl.d` that contains `net.ipv4.ping_group_range=0 $MAX_UID`.
diff --git a/vendor.conf b/vendor.conf
index 85b784d9b..f2d7fa414 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -11,8 +11,8 @@ github.com/containerd/cgroups 58556f5ad8448d99a6f7bea69ea4bdb7747cfeb0
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
github.com/containernetworking/plugins 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1
-github.com/containers/image bd10b1b53b2976f215b3f2f848fb8e7cad779aeb
-github.com/containers/storage 09abf3a26b8a3aa69e29fd7faeb260b98d675759
+github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067
+github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d
github.com/containers/psgo 5dde6da0bc8831b35243a847625bcf18183bd1ee
github.com/coreos/go-systemd v14
github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c
@@ -51,7 +51,7 @@ github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc b4e2ecb452d9ee4381137cc0a7e6715b96bed6de
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce
github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
+github.com/opencontainers/selinux 51c6c0a5dbc675792e953298cb9871819d6f9bb8
github.com/ostreedev/ostree-go master
github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
@@ -77,7 +77,7 @@ golang.org/x/sys master
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
golang.org/x/sync master
-google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go<
+google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
gopkg.in/cheggaaa/pb.v1 v1.0.7
gopkg.in/inf.v0 v0.9.0
gopkg.in/mgo.v2 v2
@@ -92,10 +92,11 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master
github.com/varlink/go master
-github.com/containers/buildah 46c577c87d5a7ab30ef40cfa695cd2b96b32b117
+github.com/containers/buildah dd0f4f1b1eb49b841179049ac498e4b0f874b462
github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master
github.com/ulikunitz/xz v0.5.4
github.com/mailru/easyjson 03f2033d19d5860aef995fe360ac7d395cd8ce65
github.com/coreos/go-iptables 25d087f3cffd9aedc0c2b7eff25f23cbf3c20fe1
+github.com/google/shlex c34317bd91bf98fab745d77b03933cf8769299fe
diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md
index 6a79e524b..12eafdf88 100644
--- a/vendor/github.com/containers/buildah/README.md
+++ b/vendor/github.com/containers/buildah/README.md
@@ -105,8 +105,10 @@ $ sudo ./lighttpd.sh
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
+| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. |
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
+| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 9994d6cd0..cbdb5c9f9 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -25,7 +25,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.5-dev"
+ Version = "1.6-dev"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -224,6 +224,7 @@ func GetBuildInfo(b *Builder) BuilderInfo {
ContainerID: b.ContainerID,
MountPoint: b.MountPoint,
ProcessLabel: b.ProcessLabel,
+ MountLabel: b.MountLabel,
ImageAnnotations: b.ImageAnnotations,
ImageCreatedBy: b.ImageCreatedBy,
OCIv1: b.OCIv1,
@@ -316,6 +317,10 @@ type BuilderOptions struct {
// the registry together, can not be resolved to a reference to a
// source image. No separator is implicitly added.
Transport string
+ // PullBlobDirectory is the name of a directory in which we'll attempt
+ // to store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ PullBlobDirectory string
// Mount signals to NewBuilder() that the container should be mounted
// immediately.
Mount bool
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 51e2d2bd4..6a1400e61 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -955,6 +955,20 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
return nil
}
+func makeReadOnly(mntpoint string, flags uintptr) error {
+ var fs unix.Statfs_t
+ // Make sure it's read-only.
+ if err := unix.Statfs(mntpoint, &fs); err != nil {
+ return errors.Wrapf(err, "error checking if directory %q was bound read-only", mntpoint)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil {
+ return errors.Wrapf(err, "error remounting %s in mount namespace read-only", mntpoint)
+ }
+ }
+ return nil
+}
+
// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a
// callback that will clean up its work.
func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) {
@@ -976,7 +990,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
bindFlags := commonFlags | unix.MS_NODEV
devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY
procFlags := devFlags | unix.MS_NODEV
- sysFlags := devFlags | unix.MS_NODEV | unix.MS_RDONLY
+ sysFlags := devFlags | unix.MS_NODEV
// Bind /dev read-only.
subDev := filepath.Join(spec.Root.Path, "/dev")
@@ -1030,13 +1044,22 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
}
}
- // Make sure it's read-only.
- if err = unix.Statfs(subSys, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys)
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
}
- if fs.Flags&unix.ST_RDONLY == 0 {
- if err := unix.Mount(subSys, subSys, "bind", sysFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting /sys in mount namespace read-only")
+
+ mnts, _ := mount.GetMounts()
+ for _, m := range mnts {
+ if !strings.HasPrefix(m.Mountpoint, "/sys/") &&
+ m.Mountpoint != "/sys" {
+ continue
+ }
+ subSys := filepath.Join(spec.Root.Path, m.Mountpoint)
+ if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil {
+ return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
+ }
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
}
}
logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys"))
@@ -1044,10 +1067,6 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// Add /sys/fs/selinux to the set of masked paths, to ensure that we don't have processes
// attempting to interact with labeling, when they aren't allowed to do so.
spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux")
- // Add /sys/fs/cgroup to the set of masked paths, to ensure that we don't have processes
- // attempting to mess with cgroup configuration, when they aren't allowed to do so.
- spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup")
-
// Bind mount in everything we've been asked to mount.
for _, m := range spec.Mounts {
// Skip anything that we just mounted.
@@ -1143,11 +1162,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
logrus.Debugf("mounted a tmpfs to %q", target)
}
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys)
+ return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
}
if uintptr(fs.Flags)&expectedFlags != expectedFlags {
if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags")
+ return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target)
}
}
}
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 71696b432..591ead4e6 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"time"
+ "github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -55,6 +56,12 @@ type CommitOptions struct {
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers. If blobs are available, the
+ // manifest of the new image will reference the blobs rather than
+ // on-disk layers.
+ BlobDirectory string
// OnBuild is a list of commands to be run by images based on this image
OnBuild []string
@@ -85,6 +92,11 @@ type PushOptions struct {
// ManifestType is the format to use when saving the imge using the 'dir' transport
// possible options are oci, v2s1, and v2s2
ManifestType string
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers, substituting them in the list of
+ // blobs to copy whenever possible.
+ BlobDirectory string
}
// Commit writes the contents of the container, along with its updated
@@ -128,13 +140,37 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
}
}
- src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.Compression, options.HistoryTimestamp)
+ src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
}
+ var maybeCachedSrc types.ImageReference = src
+ var maybeCachedDest types.ImageReference = dest
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression != archive.Uncompressed {
+ compress = types.Compress
+ }
+ cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
+ }
+ maybeCachedSrc = cache
+ cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory)
+ }
+ maybeCachedDest = cache
+ }
// "Copy" our image to where it needs to be.
+ switch options.Compression {
+ case archive.Uncompressed:
+ systemContext.OCIAcceptUncompressedLayers = true
+ case archive.Gzip:
+ systemContext.DirForceCompress = true
+ }
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, "")); err != nil {
+ if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
if len(options.AdditionalTags) > 0 {
@@ -209,10 +245,28 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
if err != nil {
return nil, "", err
}
+ var maybeCachedSrc types.ImageReference = src
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression != archive.Uncompressed {
+ compress = types.Compress
+ }
+ cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
+ }
+ maybeCachedSrc = cache
+ }
// Copy everything.
+ switch options.Compression {
+ case archive.Uncompressed:
+ systemContext.OCIAcceptUncompressedLayers = true
+ case archive.Gzip:
+ systemContext.DirForceCompress = true
+ }
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, options.ManifestType)); err != nil {
- return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(src), transports.ImageName(dest))
+ if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil {
+ return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
fmt.Fprintf(options.ReportWriter, "")
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 56a901925..dfdc33a22 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -2,12 +2,14 @@ package buildah
import (
"io"
-
- "github.com/sirupsen/logrus"
+ "os"
+ "path/filepath"
cp "github.com/containers/image/copy"
"github.com/containers/image/transports"
"github.com/containers/image/types"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/sirupsen/logrus"
)
const (
@@ -17,28 +19,42 @@ const (
DOCKER = "docker"
)
+// userRegistriesFile is the path to the per user registry configuration file.
+var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf")
+
func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
sourceCtx := &types.SystemContext{}
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sourceCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
}
sourceInsecure, err := isReferenceInsecure(sourceReference, sourceCtx)
if err != nil {
logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(sourceReference), err)
} else if sourceInsecure {
- sourceCtx.DockerInsecureSkipTLSVerify = true
sourceCtx.OCIInsecureSkipTLSVerify = true
}
destinationCtx := &types.SystemContext{}
if destinationSystemContext != nil {
*destinationCtx = *destinationSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ destinationCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+ }
}
destinationInsecure, err := isReferenceInsecure(destinationReference, destinationCtx)
if err != nil {
logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(destinationReference), err)
} else if destinationInsecure {
- destinationCtx.DockerInsecureSkipTLSVerify = true
destinationCtx.OCIInsecureSkipTLSVerify = true
}
@@ -58,5 +74,11 @@ func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string)
if signaturePolicyPath != "" {
sc.SignaturePolicyPath = signaturePolicyPath
}
+ if sc.SystemRegistriesConfPath == "" && rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sc.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
return sc
}
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 89224b674..3609694f6 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -543,3 +543,37 @@ func (b *Builder) SetStopSignal(stopSignal string) {
b.OCIv1.Config.StopSignal = stopSignal
b.Docker.Config.StopSignal = stopSignal
}
+
+// Healthcheck returns information that recommends how a container engine
+// should check if a running container is "healthy".
+func (b *Builder) Healthcheck() *docker.HealthConfig {
+ if b.Docker.Config.Healthcheck == nil {
+ return nil
+ }
+ return &docker.HealthConfig{
+ Test: copyStringSlice(b.Docker.Config.Healthcheck.Test),
+ Interval: b.Docker.Config.Healthcheck.Interval,
+ Timeout: b.Docker.Config.Healthcheck.Timeout,
+ StartPeriod: b.Docker.Config.Healthcheck.StartPeriod,
+ Retries: b.Docker.Config.Healthcheck.Retries,
+ }
+}
+
+// SetHealthcheck sets recommended commands to run in order to verify that a
+// running container based on this image is "healthy", along with information
+// specifying how often that test should be run, and how many times the test
+// should fail before the container should be considered unhealthy.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
+ b.Docker.Config.Healthcheck = nil
+ if config != nil {
+ b.Docker.Config.Healthcheck = &docker.HealthConfig{
+ Test: copyStringSlice(config.Test),
+ Interval: config.Interval,
+ Timeout: config.Timeout,
+ StartPeriod: config.StartPeriod,
+ Retries: config.Retries,
+ }
+ }
+}
diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go
index 25f76cf74..e3bddba20 100644
--- a/vendor/github.com/containers/buildah/delete.go
+++ b/vendor/github.com/containers/buildah/delete.go
@@ -1,7 +1,6 @@
package buildah
import (
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@@ -14,5 +13,5 @@ func (b *Builder) Delete() error {
b.MountPoint = ""
b.Container = ""
b.ContainerID = ""
- return label.ReleaseLabel(b.ProcessLabel)
+ return nil
}
diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go
index 759fc1246..6847d36fd 100644
--- a/vendor/github.com/containers/buildah/docker/types.go
+++ b/vendor/github.com/containers/buildah/docker/types.go
@@ -60,8 +60,9 @@ type HealthConfig struct {
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // Time to wait after the container starts before running the first check.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 31aff9eea..163d34269 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -57,22 +57,24 @@ type containerImageRef struct {
squash bool
tarPath func(path string) (io.ReadCloser, error)
parent string
+ blobDirectory string
}
type containerImageSource struct {
- path string
- ref *containerImageRef
- store storage.Store
- containerID string
- mountLabel string
- layerID string
- names []string
- compression archive.Compression
- config []byte
- configDigest digest.Digest
- manifest []byte
- manifestType string
- exporting bool
+ path string
+ ref *containerImageRef
+ store storage.Store
+ containerID string
+ mountLabel string
+ layerID string
+ names []string
+ compression archive.Compression
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+ manifestType string
+ exporting bool
+ blobDirectory string
}
func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) {
@@ -105,12 +107,11 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
-func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) {
+func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
- //TODO: Convert to manifest.DockerV2Schema2LayerUncompressedMediaType once available
dmediaType = docker.V2S2MediaTypeUncompressedLayer
- if i.compression != archive.Uncompressed {
- switch i.compression {
+ if layerCompression != archive.Uncompressed {
+ switch layerCompression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = manifest.DockerV2Schema2LayerMediaType
@@ -281,19 +282,21 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// The default layer media type assumes no compression.
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
+ // Look up this layer.
+ layer, err := i.store.Layer(layerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
+ }
// If we're not re-exporting the data, and we're reusing layers individually, reuse
// the blobsum and diff IDs.
if !i.exporting && !i.squash && layerID != i.layerID {
- layer, err2 := i.store.Layer(layerID)
- if err2 != nil {
- return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
- }
if layer.UncompressedDigest == "" {
return nil, errors.Errorf("unable to look up size of layer %q", layerID)
}
layerBlobSum := layer.UncompressedDigest
layerBlobSize := layer.UncompressedSize
- // Note this layer in the manifest, using the uncompressed blobsum.
+ diffID := layer.UncompressedDigest
+ // Note this layer in the manifest, using the appropriate blobsum.
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
Digest: layerBlobSum,
@@ -306,13 +309,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
Size: layerBlobSize,
}
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
- // Note this layer in the list of diffIDs, again using the uncompressed blobsum.
- oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum)
- dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum)
+ // Note this layer in the list of diffIDs, again using the uncompressed digest.
+ oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
+ dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
continue
}
- // Figure out if we need to change the media type, in case we're using compression.
- omediaType, dmediaType, err = i.computeLayerMIMEType(what)
+ // Figure out if we need to change the media type, in case we've changed the compression.
+ omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
if err != nil {
return nil, err
}
@@ -369,8 +372,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
logrus.Debugf("%s size is %d bytes", what, size)
// Rename the layer so that we can more easily find it by digest later.
- if err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())); err != nil {
- return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
+ finalBlobName := filepath.Join(path, destHasher.Digest().String())
+ if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
+ return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName)
}
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests.
@@ -473,19 +477,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
panic("unreachable code: unsupported manifest type")
}
src = &containerImageSource{
- path: path,
- ref: i,
- store: i.store,
- containerID: i.containerID,
- mountLabel: i.mountLabel,
- layerID: i.layerID,
- names: i.names,
- compression: i.compression,
- config: config,
- configDigest: digest.Canonical.FromBytes(config),
- manifest: imageManifest,
- manifestType: manifestType,
- exporting: i.exporting,
+ path: path,
+ ref: i,
+ store: i.store,
+ containerID: i.containerID,
+ mountLabel: i.mountLabel,
+ layerID: i.layerID,
+ names: i.names,
+ compression: i.compression,
+ config: config,
+ configDigest: digest.Canonical.FromBytes(config),
+ manifest: imageManifest,
+ manifestType: manifestType,
+ exporting: i.exporting,
+ blobDirectory: i.blobDirectory,
}
return src, nil
}
@@ -552,7 +557,7 @@ func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.B
return nil, nil
}
-func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
+func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest {
logrus.Debugf("start reading config")
reader := bytes.NewReader(i.config)
@@ -562,7 +567,16 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
}
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
}
- layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600)
+ var layerFile *os.File
+ for _, path := range []string{i.blobDirectory, i.path} {
+ layerFile, err = os.OpenFile(filepath.Join(path, blob.Digest.String()), os.O_RDONLY, 0600)
+ if err == nil {
+ break
+ }
+ if !os.IsNotExist(err) {
+ logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), path, err)
+ }
+ }
if err != nil {
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
return nil, -1, errors.Wrapf(err, "error opening file %q to buffer layer blob", filepath.Join(i.path, blob.Digest.String()))
@@ -585,7 +599,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
}
-func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
+func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
@@ -631,6 +645,7 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
squash: squash,
tarPath: b.tarPath(),
parent: parent,
+ blobDirectory: blobDirectory,
}
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 41d85cbc6..2681bc198 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -10,11 +10,13 @@ import (
"os"
"os/exec"
"path/filepath"
+ "regexp"
"strconv"
"strings"
"time"
"github.com/containers/buildah"
+ buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -167,6 +169,8 @@ type BuildOptions struct {
// ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if
// the build was unsuccessful.
ForceRmIntermediateCtrs bool
+ // BlobDirectory is a directory which we'll use for caching layer blobs.
+ BlobDirectory string
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
@@ -222,11 +226,24 @@ type Executor struct {
forceRmIntermediateCtrs bool
containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build
imageMap map[string]string // Used to map images that we create to handle the AS construct.
+ copyFrom string // Used to keep track of the --from flag from COPY and ADD
+ blobDirectory string
+}
+// builtinAllowedBuildArgs is list of built-in allowed build args
+var builtinAllowedBuildArgs = map[string]bool{
+ "HTTP_PROXY": true,
+ "http_proxy": true,
+ "HTTPS_PROXY": true,
+ "https_proxy": true,
+ "FTP_PROXY": true,
+ "ftp_proxy": true,
+ "NO_PROXY": true,
+ "no_proxy": true,
}
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
-func (b *Executor) withName(name string, index int) *Executor {
+func (b *Executor) withName(name string, index int, from string) *Executor {
if b.named == nil {
b.named = make(map[string]*Executor)
}
@@ -235,6 +252,7 @@ func (b *Executor) withName(name string, index int) *Executor {
copied.name = name
child := &copied
b.named[name] = child
+ b.named[from] = child
if idx := strconv.Itoa(index); idx != name {
b.named[idx] = child
}
@@ -563,39 +581,40 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
registry: options.Registry,
transport: options.Transport,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
- quiet: options.Quiet,
- runtime: options.Runtime,
- runtimeArgs: options.RuntimeArgs,
- transientMounts: options.TransientMounts,
- compression: options.Compression,
- output: options.Output,
- outputFormat: options.OutputFormat,
- additionalTags: options.AdditionalTags,
- signaturePolicyPath: options.SignaturePolicyPath,
- systemContext: options.SystemContext,
- volumeCache: make(map[string]string),
- volumeCacheInfo: make(map[string]os.FileInfo),
- log: options.Log,
- in: options.In,
- out: options.Out,
- err: options.Err,
- reportWriter: options.ReportWriter,
- isolation: options.Isolation,
- namespaceOptions: options.NamespaceOptions,
- configureNetwork: options.ConfigureNetwork,
- cniPluginPath: options.CNIPluginPath,
- cniConfigDir: options.CNIConfigDir,
- idmappingOptions: options.IDMappingOptions,
- commonBuildOptions: options.CommonBuildOpts,
- defaultMountsFilePath: options.DefaultMountsFilePath,
- iidfile: options.IIDFile,
- squash: options.Squash,
- labels: append([]string{}, options.Labels...),
- annotations: append([]string{}, options.Annotations...),
- layers: options.Layers,
- noCache: options.NoCache,
- removeIntermediateCtrs: options.RemoveIntermediateCtrs,
- forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ quiet: options.Quiet,
+ runtime: options.Runtime,
+ runtimeArgs: options.RuntimeArgs,
+ transientMounts: options.TransientMounts,
+ compression: options.Compression,
+ output: options.Output,
+ outputFormat: options.OutputFormat,
+ additionalTags: options.AdditionalTags,
+ signaturePolicyPath: options.SignaturePolicyPath,
+ systemContext: options.SystemContext,
+ volumeCache: make(map[string]string),
+ volumeCacheInfo: make(map[string]os.FileInfo),
+ log: options.Log,
+ in: options.In,
+ out: options.Out,
+ err: options.Err,
+ reportWriter: options.ReportWriter,
+ isolation: options.Isolation,
+ namespaceOptions: options.NamespaceOptions,
+ configureNetwork: options.ConfigureNetwork,
+ cniPluginPath: options.CNIPluginPath,
+ cniConfigDir: options.CNIConfigDir,
+ idmappingOptions: options.IDMappingOptions,
+ commonBuildOptions: options.CommonBuildOpts,
+ defaultMountsFilePath: options.DefaultMountsFilePath,
+ iidfile: options.IIDFile,
+ squash: options.Squash,
+ labels: append([]string{}, options.Labels...),
+ annotations: append([]string{}, options.Annotations...),
+ layers: options.Layers,
+ noCache: options.NoCache,
+ removeIntermediateCtrs: options.RemoveIntermediateCtrs,
+ forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ blobDirectory: options.BlobDirectory,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -651,6 +670,7 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s
PullPolicy: b.pullPolicy,
Registry: b.registry,
Transport: b.transport,
+ PullBlobDirectory: b.blobDirectory,
SignaturePolicyPath: b.signaturePolicyPath,
ReportWriter: b.reportWriter,
SystemContext: b.systemContext,
@@ -764,7 +784,7 @@ func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) {
if err != nil {
candidates, _, err := util.ResolveName(b.output, "", b.systemContext, b.store)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q: %v", b.output)
+ return nil, errors.Wrapf(err, "error parsing target image name %q", b.output)
}
if len(candidates) == 0 {
return nil, errors.Errorf("error parsing target image name %q", b.output)
@@ -793,12 +813,28 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
commitName := b.output
b.containerIDs = nil
+ var leftoverArgs []string
+ for arg := range b.builder.Args {
+ if !builtinAllowedBuildArgs[arg] {
+ leftoverArgs = append(leftoverArgs, arg)
+ }
+ }
for i, node := range node.Children {
step := ib.Step()
if err := step.Resolve(node); err != nil {
return errors.Wrapf(err, "error resolving step %+v", *node)
}
logrus.Debugf("Parsed Step: %+v", *step)
+ if step.Command == "arg" {
+ for index, arg := range leftoverArgs {
+ for _, Arg := range step.Args {
+ list := strings.SplitN(Arg, "=", 2)
+ if arg == list[0] {
+ leftoverArgs = append(leftoverArgs[:index], leftoverArgs[index+1:]...)
+ }
+ }
+ }
+ }
if !b.quiet {
b.log("%s", step.Original)
}
@@ -826,6 +862,18 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
err error
imgID string
)
+
+ b.copyFrom = ""
+ // Check if --from exists in the step command of COPY or ADD
+ // If it exists, set b.copyfrom to that value
+ for _, n := range step.Flags {
+ if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") {
+ arr := strings.Split(n, "=")
+ b.copyFrom = b.named[arr[1]].mountPoint
+ break
+ }
+ }
+
// checkForLayers will be true if b.layers is true and a cached intermediate image is found.
// checkForLayers is set to false when either there is no cached image or a break occurs where
// the instructions in the Dockerfile change from a previous build.
@@ -848,6 +896,7 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
if err := b.copyExistingImage(ctx, cacheID); err != nil {
return err
}
+ b.containerIDs = append(b.containerIDs, b.builder.ContainerID)
break
}
@@ -882,6 +931,9 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
}
}
}
+ if len(leftoverArgs) > 0 {
+ fmt.Fprintf(b.out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
+ }
return nil
}
@@ -1009,6 +1061,11 @@ func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) {
currNode = currNode.Next
continue
}
+ if b.copyFrom != "" {
+ src = append(src, filepath.Join(b.copyFrom, currNode.Value))
+ currNode = currNode.Next
+ continue
+ }
matches, err := filepath.Glob(filepath.Join(b.contextDir, currNode.Value))
if err != nil {
return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value)
@@ -1044,17 +1101,19 @@ func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (
}
continue
}
- // For local files, walk the file tree and check the time stamps.
- timeIsGreater := false
- err := filepath.Walk(item, func(path string, info os.FileInfo, err error) error {
- if info.ModTime().After(*historyTime) {
- timeIsGreater = true
- return nil
- }
- return nil
- })
+ // Walks the file tree for local files and uses chroot to ensure we don't escape out of the allowed path
+ // when resolving any symlinks.
+ // Change the time format to ensure we don't run into a parsing error when converting again from string
+ // to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular
+ // time format here when converting to a string.
+ // If the COPY has --from in the command, change the rootdir to mountpoint of the container it is copying from
+ rootdir := b.contextDir
+ if b.copyFrom != "" {
+ rootdir = b.copyFrom
+ }
+ timeIsGreater, err := resolveModifiedTime(rootdir, item, historyTime.Format(time.RFC3339Nano))
if err != nil {
- return false, errors.Wrapf(err, "error walking file tree %q", item)
+ return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item)
}
if timeIsGreater {
return false, nil
@@ -1119,6 +1178,17 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created
b.builder.SetEntrypoint(config.Entrypoint)
b.builder.SetShell(config.Shell)
b.builder.SetStopSignal(config.StopSignal)
+ if config.Healthcheck != nil {
+ b.builder.SetHealthcheck(&buildahdocker.HealthConfig{
+ Test: append([]string{}, config.Healthcheck.Test...),
+ Interval: config.Healthcheck.Interval,
+ Timeout: config.Healthcheck.Timeout,
+ StartPeriod: config.Healthcheck.StartPeriod,
+ Retries: config.Healthcheck.Retries,
+ })
+ } else {
+ b.builder.SetHealthcheck(nil)
+ }
b.builder.ClearLabels()
for k, v := range config.Labels {
b.builder.SetLabel(k, v)
@@ -1164,6 +1234,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created
SystemContext: b.systemContext,
IIDFile: b.iidfile,
Squash: b.squash,
+ BlobDirectory: b.blobDirectory,
Parent: b.builder.FromImageID,
}
imgID, ref, _, err := b.builder.Commit(ctx, imageRef, options)
@@ -1189,8 +1260,16 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
b.imageMap = make(map[string]string)
stageCount := 0
for _, stage := range stages {
- stageExecutor = b.withName(stage.Name, stage.Position)
- if err := stageExecutor.Prepare(ctx, stage, ""); err != nil {
+ ib := stage.Builder
+ node := stage.Node
+ base, err := ib.From(node)
+ if err != nil {
+ logrus.Debugf("Build(node.Children=%#v)", node.Children)
+ return "", nil, err
+ }
+
+ stageExecutor = b.withName(stage.Name, stage.Position, base)
+ if err := stageExecutor.Prepare(ctx, stage, base); err != nil {
return "", nil, err
}
// Always remove the intermediate/build containers, even if the build was unsuccessful.
@@ -1289,15 +1368,24 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
} else {
// If the Dockerfile isn't found try prepending the
// context directory to it.
- if _, err := os.Stat(dfile); os.IsNotExist(err) {
+ dinfo, err := os.Stat(dfile)
+ if os.IsNotExist(err) {
dfile = filepath.Join(options.ContextDirectory, dfile)
}
+ dinfo, err = os.Stat(dfile)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
+ }
+ // If given a directory, add '/Dockerfile' to it.
+ if dinfo.Mode().IsDir() {
+ dfile = filepath.Join(dfile, "Dockerfile")
+ }
logrus.Debugf("reading local Dockerfile %q", dfile)
contents, err := os.Open(dfile)
if err != nil {
return "", nil, errors.Wrapf(err, "error reading %q", dfile)
}
- dinfo, err := contents.Stat()
+ dinfo, err = contents.Stat()
if err != nil {
contents.Close()
return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
@@ -1320,6 +1408,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
dockerfiles = append(dockerfiles, data)
}
+
+ dockerfiles = processCopyFrom(dockerfiles)
+
mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0])
if err != nil {
return "", nil, errors.Wrapf(err, "error parsing main Dockerfile")
@@ -1336,10 +1427,87 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
return "", nil, errors.Wrapf(err, "error creating build executor")
}
b := imagebuilder.NewBuilder(options.Args)
- stages := imagebuilder.NewStages(mainNode, b)
+ stages, err := imagebuilder.NewStages(mainNode, b)
+ if err != nil {
+ return "", nil, errors.Wrap(err, "error reading multiple stages")
+ }
return exec.Build(ctx, stages)
}
+// processCopyFrom goes through the Dockerfiles and handles any 'COPY --from' instances
+// prepending a new FROM statement the Dockerfile that do not already have a corresponding
+// FROM command within them.
+func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser {
+
+ var newDockerfiles []io.ReadCloser
+ // fromMap contains the names of the images seen in a FROM
+ // line in the Dockerfiles. The boolean value just completes the map object.
+ fromMap := make(map[string]bool)
+ // asMap contains the names of the images seen after a "FROM image AS"
+ // line in the Dockefiles. The boolean value just completes the map object.
+ asMap := make(map[string]bool)
+
+ copyRE := regexp.MustCompile(`\s*COPY\s+--from=`)
+ fromRE := regexp.MustCompile(`\s*FROM\s+`)
+ asRE := regexp.MustCompile(`(?i)\s+as\s+`)
+ for _, dfile := range dockerfiles {
+ if dfileBinary, err := ioutil.ReadAll(dfile); err == nil {
+ dfileString := fmt.Sprintf("%s", dfileBinary)
+ copyFromContent := copyRE.Split(dfileString, -1)
+ // no "COPY --from=", just continue
+ if len(copyFromContent) < 2 {
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
+ continue
+ }
+ // Load all image names in our Dockerfiles into a map
+ // for easy reference later.
+ fromContent := fromRE.Split(dfileString, -1)
+ for i := 0; i < len(fromContent); i++ {
+ imageName := strings.Split(fromContent[i], " ")
+ if len(imageName) > 0 {
+ finalImage := strings.Split(imageName[0], "\n")
+ if finalImage[0] != "" {
+ fromMap[strings.TrimSpace(finalImage[0])] = true
+ }
+ }
+ }
+ logrus.Debug("fromMap: ", fromMap)
+
+ // Load all image names associated with an 'as' or 'AS' in
+ // our Dockerfiles into a map for easy reference later.
+ asContent := asRE.Split(dfileString, -1)
+ // Skip the first entry in the array as it's stuff before
+ // the " as " and we don't care.
+ for i := 1; i < len(asContent); i++ {
+ asName := strings.Split(asContent[i], " ")
+ if len(asName) > 0 {
+ finalAsImage := strings.Split(asName[0], "\n")
+ if finalAsImage[0] != "" {
+ asMap[strings.TrimSpace(finalAsImage[0])] = true
+ }
+ }
+ }
+ logrus.Debug("asMap: ", asMap)
+
+ for i := 1; i < len(copyFromContent); i++ {
+ fromArray := strings.Split(copyFromContent[i], " ")
+ // If the image isn't a stage number or already declared,
+ // add a FROM statement for it to the top of our Dockerfile.
+ trimmedFrom := strings.TrimSpace(fromArray[0])
+ _, okFrom := fromMap[trimmedFrom]
+ _, okAs := asMap[trimmedFrom]
+ _, err := strconv.Atoi(trimmedFrom)
+ if !okFrom && !okAs && err != nil {
+ from := "FROM " + trimmedFrom
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(from)))
+ }
+ }
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
+ } // End if dfileBinary, err := ioutil.ReadAll(dfile); err == nil
+ } // End for _, dfile := range dockerfiles {
+ return newDockerfiles
+}
+
// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs
// and deletes the containers associated with that ID.
func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
index 20e396f1f..edb5837db 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
+ "time"
"github.com/containers/storage/pkg/reexec"
"github.com/pkg/errors"
@@ -14,13 +15,18 @@ import (
const (
symlinkChrootedCommand = "chrootsymlinks-resolve"
+ symlinkModifiedTime = "modtimesymlinks-resolve"
maxSymlinksResolved = 40
)
func init() {
reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks)
+ reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified)
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
func resolveChrootedSymlinks() {
status := 0
flag.Parse()
@@ -39,7 +45,7 @@ func resolveChrootedSymlinks() {
}
// Our second parameter is the path name to evaluate for symbolic links
- symLink, err := getSymbolicLink(flag.Arg(0), flag.Arg(1))
+ symLink, err := getSymbolicLink(flag.Arg(1))
if err != nil {
fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err)
os.Exit(1)
@@ -51,7 +57,8 @@ func resolveChrootedSymlinks() {
os.Exit(status)
}
-// ResolveSymlink resolves any symlink in filename in the context of rootdir.
+// ResolveSymLink (in the grandparent process) resolves any symlink in filename
+// in the context of rootdir.
func ResolveSymLink(rootdir, filename string) (string, error) {
// The child process expects a chroot and one path that
// will be consulted relative to the chroot directory and evaluated
@@ -62,32 +69,128 @@ func ResolveSymLink(rootdir, filename string) (string, error) {
return "", errors.Wrapf(err, string(output))
}
- // Hand back the resolved symlink, will be "" if a symlink is not found
+ // Hand back the resolved symlink, will be filename if a symlink is not found
return string(output), nil
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
+func resolveSymlinkTimeModified() {
+ status := 0
+ flag.Parse()
+ if len(flag.Args()) < 1 {
+ os.Exit(1)
+ }
+ // Our first parameter is the directory to chroot into.
+ if err := unix.Chdir(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chdir(): %v\n", err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chroot(): %v\n", err)
+ os.Exit(1)
+ }
+
+ // Our second parameter is the path name to evaluate for symbolic links.
+ // Our third parameter is the time the cached intermediate image was created.
+ // We check whether the modified time of the filepath we provide is after the time the cached image was created.
+ timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err)
+ os.Exit(1)
+ }
+ if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil {
+ fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err)
+ os.Exit(1)
+ }
+ os.Exit(status)
+}
+
+// resolveModifiedTime (in the grandparent process) checks filename for any symlinks,
+// resolves it and compares the modified time of the file with historyTime, which is
+// the creation time of the cached image. It returns true if filename was modified after
+// historyTime, otherwise returns false.
+func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
+ // The child process expects a chroot and one path that
+ // will be consulted relative to the chroot directory and evaluated
+ // for any symbolic links present.
+ cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return false, errors.Wrapf(err, string(output))
+ }
+ // Hand back true/false depending on in the file was modified after the caches image was created.
+ return string(output) == "true", nil
+}
+
+// modTimeIsGreater goes through the files added/copied in using the Dockerfile and
+// checks the time stamp (follows symlinks) with the time stamp of when the cached
+// image was created. IT compares the two and returns true if the file was modified
+// after the cached image was created, otherwise it returns false.
+func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
+ var timeIsGreater bool
+
+ // Convert historyTime from string to time.Time for comparison
+ histTime, err := time.Parse(time.RFC3339Nano, historyTime)
+ if err != nil {
+ return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime)
+ }
+ // Walk the file tree and check the time stamps.
+ // Since we are chroot in rootdir, only want the path of the actual filename, i.e path - rootdir.
+ // +1 to account for the extra "/" (e.g rootdir=/home/user/mydir, path=/home/user/mydir/myfile.json)
+ err = filepath.Walk(path[len(rootdir)+1:], func(path string, info os.FileInfo, err error) error {
+ // If using cached images, it is possible for files that are being copied to come from
+ // previous build stages. But if using cached images, then the copied file won't exist
+ // since a container won't have been created for the previous build stage and info will be nil.
+ // In that case just return nil and continue on with using the cached image for the whole build process.
+ if info == nil {
+ return nil
+ }
+ modTime := info.ModTime()
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ // Evaluate any symlink that occurs to get updated modified information
+ resolvedPath, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return errors.Wrapf(err, "error evaluating symlink %q", path)
+ }
+ fileInfo, err := os.Stat(resolvedPath)
+ if err != nil {
+ return errors.Wrapf(err, "error getting file info %q", resolvedPath)
+ }
+ modTime = fileInfo.ModTime()
+ }
+ if modTime.After(histTime) {
+ timeIsGreater = true
+ return nil
+ }
+ return nil
+ })
+ if err != nil {
+ return false, errors.Wrapf(err, "error walking file tree %q", path)
+ }
+ return timeIsGreater, err
+}
+
// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear.
// Returns what the whole target path for what "path" resolves to.
-func getSymbolicLink(rootdir, path string) (string, error) {
+func getSymbolicLink(path string) (string, error) {
var (
symPath string
symLinksResolved int
)
-
- // Splitting path as we need to resolve each parth of the path at a time
+ // Splitting path as we need to resolve each part of the path at a time
splitPath := strings.Split(path, "/")
if splitPath[0] == "" {
splitPath = splitPath[1:]
symPath = "/"
}
-
for _, p := range splitPath {
// If we have resolved 40 symlinks, that means something is terribly wrong
// will return an error and exit
if symLinksResolved >= maxSymlinksResolved {
return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
}
-
symPath = filepath.Join(symPath, p)
isSymlink, resolvedPath, err := hasSymlink(symPath)
if err != nil {
@@ -119,16 +222,21 @@ func getSymbolicLink(rootdir, path string) (string, error) {
// otherwise it returns false and path
func hasSymlink(path string) (bool, string, error) {
info, err := os.Lstat(path)
- if os.IsNotExist(err) {
- if err = os.MkdirAll(path, 0755); err != nil {
- return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
- }
- info, err = os.Lstat(path)
- if err != nil {
- return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(path, 0755); err != nil {
+ return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
+ }
+ info, err = os.Lstat(path)
+ if err != nil {
+ return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ }
+ } else {
+ return false, path, errors.Wrapf(err, "error get stat of path %q", path)
}
}
- // Return false and path as path is not a symlink
+
+ // Return false and path as path if not a symlink
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
return false, path, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index 35dc5438a..4f5301b73 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -111,3 +111,28 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
func InitReexec() bool {
return buildah.InitReexec()
}
+
+// ReposToMap parses the specified repotags and returns a map with repositories
+// as keys and the corresponding arrays of tags as values.
+func ReposToMap(repotags []string) map[string][]string {
+ // map format is repo -> tag
+ repos := make(map[string][]string)
+ for _, repo := range repotags {
+ var repository, tag string
+ if strings.Contains(repo, ":") {
+ li := strings.LastIndex(repo, ":")
+ repository = repo[0:li]
+ tag = repo[li+1:]
+ } else if len(repo) > 0 {
+ repository = repo
+ tag = "<none>"
+ } else {
+ logrus.Warnf("Found image with empty name")
+ }
+ repos[repository] = append(repos[repository], tag)
+ }
+ if len(repos) == 0 {
+ repos["<none>"] = []string{"<none>"}
+ }
+ return repos
+}
diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go
new file mode 100644
index 000000000..8cd5e4438
--- /dev/null
+++ b/vendor/github.com/containers/buildah/info.go
@@ -0,0 +1,207 @@
+package buildah
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// InfoData holds the info type, i.e store, host etc and the data for each type
+type InfoData struct {
+ Type string
+ Data map[string]interface{}
+}
+
+// Info returns the store and host information
+func Info(store storage.Store) ([]InfoData, error) {
+ info := []InfoData{}
+ // get host information
+ hostInfo, err := hostInfo()
+ if err != nil {
+ logrus.Error(err, "error getting host info")
+ }
+ info = append(info, InfoData{Type: "host", Data: hostInfo})
+
+ // get store information
+ storeInfo, err := storeInfo(store)
+ if err != nil {
+ logrus.Error(err, "error getting store info")
+ }
+ info = append(info, InfoData{Type: "store", Data: storeInfo})
+ return info, nil
+}
+
+func hostInfo() (map[string]interface{}, error) {
+ info := map[string]interface{}{}
+ info["os"] = runtime.GOOS
+ info["arch"] = runtime.GOARCH
+ info["cpus"] = runtime.NumCPU()
+ info["rootless"] = rootless.IsRootless()
+ mi, err := system.ReadMemInfo()
+ if err != nil {
+ logrus.Error(err, "err reading memory info")
+ info["MemTotal"] = ""
+ info["MenFree"] = ""
+ info["SwapTotal"] = ""
+ info["SwapFree"] = ""
+ } else {
+ info["MemTotal"] = mi.MemTotal
+ info["MenFree"] = mi.MemFree
+ info["SwapTotal"] = mi.SwapTotal
+ info["SwapFree"] = mi.SwapFree
+ }
+ hostDistributionInfo := getHostDistributionInfo()
+ info["Distribution"] = map[string]interface{}{
+ "distribution": hostDistributionInfo["Distribution"],
+ "version": hostDistributionInfo["Version"],
+ }
+
+ kv, err := readKernelVersion()
+ if err != nil {
+ logrus.Error(err, "error reading kernel version")
+ }
+ info["kernel"] = kv
+
+ up, err := readUptime()
+ if err != nil {
+ logrus.Error(err, "error reading up time")
+ }
+ // Convert uptime in seconds to a human-readable format
+ upSeconds := up + "s"
+ upDuration, err := time.ParseDuration(upSeconds)
+ if err != nil {
+ logrus.Error(err, "error parsing system uptime")
+ }
+
+ hoursFound := false
+ var timeBuffer bytes.Buffer
+ var hoursBuffer bytes.Buffer
+ for _, elem := range upDuration.String() {
+ timeBuffer.WriteRune(elem)
+ if elem == 'h' || elem == 'm' {
+ timeBuffer.WriteRune(' ')
+ if elem == 'h' {
+ hoursFound = true
+ }
+ }
+ if !hoursFound {
+ hoursBuffer.WriteRune(elem)
+ }
+ }
+
+ info["uptime"] = timeBuffer.String()
+ if hoursFound {
+ hours, err := strconv.ParseFloat(hoursBuffer.String(), 64)
+ if err == nil {
+ days := hours / 24
+ info["uptime"] = fmt.Sprintf("%s (Approximately %.2f days)", info["uptime"], days)
+ }
+ }
+
+ host, err := os.Hostname()
+ if err != nil {
+ logrus.Error(err, "error getting hostname")
+ }
+ info["hostname"] = host
+
+ return info, nil
+
+}
+
+// top-level "store" info
+func storeInfo(store storage.Store) (map[string]interface{}, error) {
+ // lets say storage driver in use, number of images, number of containers
+ info := map[string]interface{}{}
+ info["GraphRoot"] = store.GraphRoot()
+ info["RunRoot"] = store.RunRoot()
+ info["GraphDriverName"] = store.GraphDriverName()
+ info["GraphOptions"] = store.GraphOptions()
+ statusPairs, err := store.Status()
+ if err != nil {
+ return nil, err
+ }
+ status := map[string]string{}
+ for _, pair := range statusPairs {
+ status[pair[0]] = pair[1]
+ }
+ info["GraphStatus"] = status
+ images, err := store.Images()
+ if err != nil {
+ logrus.Error(err, "error getting number of images")
+ }
+ info["ImageStore"] = map[string]interface{}{
+ "number": len(images),
+ }
+
+ containers, err := store.Containers()
+ if err != nil {
+ logrus.Error(err, "error getting number of containers")
+ }
+ info["ContainerStore"] = map[string]interface{}{
+ "number": len(containers),
+ }
+
+ return info, nil
+}
+
+func readKernelVersion() (string, error) {
+ buf, err := ioutil.ReadFile("/proc/version")
+ if err != nil {
+ return "", err
+ }
+ f := bytes.Fields(buf)
+ if len(f) < 2 {
+ return string(bytes.TrimSpace(buf)), nil
+ }
+ return string(f[2]), nil
+}
+
+func readUptime() (string, error) {
+ buf, err := ioutil.ReadFile("/proc/uptime")
+ if err != nil {
+ return "", err
+ }
+ f := bytes.Fields(buf)
+ if len(f) < 1 {
+ return "", fmt.Errorf("invalid uptime")
+ }
+ return string(f[0]), nil
+}
+
+// getHostDistributionInfo returns a map containing the host's distribution and version
+func getHostDistributionInfo() map[string]string {
+ dist := make(map[string]string)
+
+ // Populate values in case we cannot find the values
+ // or the file
+ dist["Distribution"] = "unknown"
+ dist["Version"] = "unknown"
+
+ f, err := os.Open("/etc/os-release")
+ if err != nil {
+ return dist
+ }
+ defer f.Close()
+
+ l := bufio.NewScanner(f)
+ for l.Scan() {
+ if strings.HasPrefix(l.Text(), "ID=") {
+ dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=")
+ }
+ if strings.HasPrefix(l.Text(), "VERSION_ID=") {
+ dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"")
+ }
+ }
+ return dist
+}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 8b0e774ba..7e7f97e49 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -3,6 +3,7 @@ package buildah
import (
"context"
"fmt"
+ "math/rand"
"strings"
"github.com/containers/buildah/util"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/storage"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/openshift/imagebuilder"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -23,11 +23,6 @@ const (
// as "no image".
BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
- // DefaultTransport is a prefix that we apply to an image name if we
- // can't find one in the local Store, in order to generate a source
- // reference for the image that we can then copy to the local Store.
- DefaultTransport = "docker://"
-
// minimumTruncatedIDLength is the minimum length of an identifier that
// we'll accept as possibly being a truncated image ID.
minimumTruncatedIDLength = 3
@@ -39,6 +34,7 @@ func pullAndFindImage(ctx context.Context, store storage.Store, imageName string
Store: store,
SystemContext: options.SystemContext,
Transport: options.Transport,
+ BlobDirectory: options.PullBlobDirectory,
}
ref, err := pullImage(ctx, store, imageName, pullOptions, sc)
if err != nil {
@@ -150,7 +146,7 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
logrus.Debugf("error parsing image name %q as given, trying with transport %q: %v", image, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
srcRef2, err := alltransports.ParseImageName(transport + image)
@@ -232,6 +228,27 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
}
+func containerNameExist(name string, containers []storage.Container) bool {
+ for _, container := range containers {
+ for _, cname := range container.Names {
+ if cname == name {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func findUnusedContainer(name string, containers []storage.Container) string {
+ suffix := 1
+ tmpName := name
+ for containerNameExist(tmpName, containers) {
+ tmpName = fmt.Sprintf("%s-%d", name, suffix)
+ suffix++
+ }
+ return tmpName
+}
+
func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
var ref types.ImageReference
var img *storage.Image
@@ -241,7 +258,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
options.FromImage = ""
}
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
@@ -277,23 +294,33 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
name = imageNamePrefix(image) + "-" + name
}
}
+ var container *storage.Container
+ tmpName := name
+ if options.Container == "" {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to check for container names")
+ }
+ tmpName = findUnusedContainer(tmpName, containers)
+ }
- coptions := storage.ContainerOptions{}
- coptions.IDMappingOptions = newContainerIDMappingOptions(options.IDMappingOptions)
-
- container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions)
- suffix := 1
- for err != nil && errors.Cause(err) == storage.ErrDuplicateName && options.Container == "" {
- suffix++
- tmpName := fmt.Sprintf("%s-%d", name, suffix)
- if container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions); err == nil {
+ conflict := 100
+ for true {
+ coptions := storage.ContainerOptions{
+ LabelOpts: options.CommonBuildOpts.LabelOpts,
+ IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions),
+ }
+ container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions)
+ if err == nil {
name = tmpName
+ break
}
+ if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" {
+ return nil, errors.Wrapf(err, "error creating container")
+ }
+ tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict)
+ conflict = conflict * 10
}
- if err != nil {
- return nil, errors.Wrapf(err, "error creating container")
- }
-
defer func() {
if err != nil {
if err2 := store.DeleteContainer(container.ID); err2 != nil {
@@ -302,13 +329,6 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
}()
- if err = ReserveSELinuxLabels(store, container.ID); err != nil {
- return nil, err
- }
- processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts)
- if err != nil {
- return nil, err
- }
uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap)
defaultNamespaceOptions, err := DefaultNamespaceOptions()
@@ -328,8 +348,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
ContainerID: container.ID,
ImageAnnotations: map[string]string{},
ImageCreatedBy: "",
- ProcessLabel: processLabel,
- MountLabel: mountLabel,
+ ProcessLabel: container.ProcessLabel(),
+ MountLabel: container.MountLabel(),
DefaultMountsFilePath: options.DefaultMountsFilePath,
Isolation: options.Isolation,
NamespaceOptions: namespaceOptions,
@@ -351,7 +371,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
if options.Mount {
- _, err = builder.Mount(mountLabel)
+ _, err = builder.Mount(container.MountLabel())
if err != nil {
return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID)
}
diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
new file mode 100644
index 000000000..63022f15d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
@@ -0,0 +1,517 @@
+package blobcache
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/containers/buildah/docker"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ _ types.ImageReference = &blobCacheReference{}
+ _ types.ImageSource = &blobCacheSource{}
+ _ types.ImageDestination = &blobCacheDestination{}
+)
+
+const (
+ compressedNote = ".compressed"
+ decompressedNote = ".decompressed"
+)
+
+// BlobCache is an object which saves copies of blobs that are written to it while passing them
+// through to some real destination, and which can be queried directly in order to read them
+// back.
+type BlobCache interface {
+ types.ImageReference
+ // HasBlob checks if a blob that matches the passed-in digest (and
+ // size, if not -1), is present in the cache.
+ HasBlob(types.BlobInfo) (bool, int64, error)
+ // Directories returns the list of cache directories.
+ Directory() string
+ // ClearCache() clears the contents of the cache directories. Note
+ // that this also clears content which was not placed there by this
+ // cache implementation.
+ ClearCache() error
+}
+
+type blobCacheReference struct {
+ reference types.ImageReference
+ directory string
+ compress types.LayerCompression
+}
+
+type blobCacheSource struct {
+ reference *blobCacheReference
+ source types.ImageSource
+ sys types.SystemContext
+ cacheHits int64
+ cacheMisses int64
+ cacheErrors int64
+}
+
+type blobCacheDestination struct {
+ reference *blobCacheReference
+ destination types.ImageDestination
+}
+
+func makeFilename(blobSum digest.Digest, isConfig bool) string {
+ if isConfig {
+ return blobSum.String() + ".config"
+ }
+ return blobSum.String()
+}
+
+// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
+// written to the destination image created from the resulting reference will also be stored
+// as-is to the specifed directory or a temporary directory. The cache directory's contents
+// can be cleared by calling the returned BlobCache()'s ClearCache() method.
+// The compress argument controls whether or not the cache will try to substitute a compressed
+// or different version of a blob when preparing the list of layers when reading an image.
+func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) {
+ if directory == "" {
+ return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref))
+ }
+ switch compress {
+ case types.Compress, types.Decompress, types.PreserveOriginal:
+ // valid value, accept it
+ default:
+ return nil, errors.Errorf("unhandled LayerCompression value %v", compress)
+ }
+ return &blobCacheReference{
+ reference: ref,
+ directory: directory,
+ compress: compress,
+ }, nil
+}
+
+func (r *blobCacheReference) Transport() types.ImageTransport {
+ return r.reference.Transport()
+}
+
+func (r *blobCacheReference) StringWithinTransport() string {
+ return r.reference.StringWithinTransport()
+}
+
+func (r *blobCacheReference) DockerReference() reference.Named {
+ return r.reference.DockerReference()
+}
+
+func (r *blobCacheReference) PolicyConfigurationIdentity() string {
+ return r.reference.PolicyConfigurationIdentity()
+}
+
+func (r *blobCacheReference) PolicyConfigurationNamespaces() []string {
+ return r.reference.PolicyConfigurationNamespaces()
+}
+
+func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return r.reference.DeleteImage(ctx, sys)
+}
+
+func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
+ if blobinfo.Digest == "" {
+ return false, -1, nil
+ }
+
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig))
+ fileInfo, err := os.Stat(filename)
+ if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) {
+ return true, fileInfo.Size(), nil
+ }
+ if !os.IsNotExist(err) {
+ return false, -1, errors.Wrapf(err, "error checking size of %q", filename)
+ }
+ }
+
+ return false, -1, nil
+}
+
+func (r *blobCacheReference) Directory() string {
+ return r.directory
+}
+
+func (r *blobCacheReference) ClearCache() error {
+ f, err := os.Open(r.directory)
+ if err != nil {
+ return errors.Wrapf(err, "error opening directory %q", r.directory)
+ }
+ defer f.Close()
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ return errors.Wrapf(err, "error reading directory %q", r.directory)
+ }
+ for _, name := range names {
+ pathname := filepath.Join(r.directory, name)
+ if err = os.RemoveAll(pathname); err != nil {
+ return errors.Wrapf(err, "error removing %q while clearing cache for %q", pathname, transports.ImageName(r))
+ }
+ }
+ return nil
+}
+
+func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ src, err := r.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference))
+ }
+ return image.FromSource(ctx, sys, src)
+}
+
+func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ src, err := r.reference.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference))
+ }
+ logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress)
+ return &blobCacheSource{reference: r, source: src, sys: *sys}, nil
+}
+
+func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ dest, err := r.reference.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference))
+ }
+ logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory)
+ return &blobCacheDestination{reference: r, destination: dest}, nil
+}
+
+func (s *blobCacheSource) Reference() types.ImageReference {
+ return s.reference
+}
+
+func (s *blobCacheSource) Close() error {
+ logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors)
+ return s.source.Close()
+}
+
+func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false))
+ manifestBytes, err := ioutil.ReadFile(filename)
+ if err == nil {
+ s.cacheHits++
+ return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, "", errors.Wrapf(err, "error checking for manifest file %q", filename)
+ }
+ }
+ s.cacheMisses++
+ return s.source.GetManifest(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ present, size, err := s.reference.HasBlob(blobinfo)
+ if err != nil {
+ return nil, -1, err
+ }
+ if present {
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig))
+ f, err := os.Open(filename)
+ if err == nil {
+ s.cacheHits++
+ return f, size, nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, -1, errors.Wrapf(err, "error checking for cache file %q", filepath.Join(s.reference.directory, filename))
+ }
+ }
+ }
+ s.cacheMisses++
+ rc, size, err := s.source.GetBlob(ctx, blobinfo, cache)
+ if err != nil {
+ return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference))
+ }
+ return rc, size, nil
+}
+
+func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ return s.source.GetSignatures(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ signatures, err := s.source.GetSignatures(ctx, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference))
+ }
+ canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0)
+
+ infos, err := s.source.LayerInfosForCopy(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference))
+ }
+ if infos == nil {
+ image, err := s.reference.NewImage(ctx, &s.sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference))
+ }
+ defer image.Close()
+ infos = image.LayerInfos()
+ }
+
+ if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
+ replacedInfos := make([]types.BlobInfo, 0, len(infos))
+ for _, info := range infos {
+ var replaceDigest []byte
+ var err error
+ blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false))
+ alternate := ""
+ switch s.reference.compress {
+ case types.Compress:
+ alternate = blobFile + compressedNote
+ replaceDigest, err = ioutil.ReadFile(alternate)
+ case types.Decompress:
+ alternate = blobFile + decompressedNote
+ replaceDigest, err = ioutil.ReadFile(alternate)
+ }
+ if err == nil && digest.Digest(replaceDigest).Validate() == nil {
+ alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false))
+ fileInfo, err := os.Stat(alternate)
+ if err == nil {
+ logrus.Debugf("suggesting cached blob with digest %q and compression %v in place of blob with digest %q", string(replaceDigest), s.reference.compress, info.Digest.String())
+ info.Digest = digest.Digest(replaceDigest)
+ info.Size = fileInfo.Size()
+ switch info.MediaType {
+ case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = v1.MediaTypeImageLayerGzip
+ case types.Decompress:
+ info.MediaType = v1.MediaTypeImageLayer
+ }
+ case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = manifest.DockerV2Schema2LayerMediaType
+ case types.Decompress:
+ info.MediaType = docker.V2S2MediaTypeUncompressedLayer
+ }
+ }
+ }
+ }
+ replacedInfos = append(replacedInfos, info)
+ }
+ infos = replacedInfos
+ }
+
+ return infos, nil
+}
+
+func (d *blobCacheDestination) Reference() types.ImageReference {
+ return d.reference
+}
+
+func (d *blobCacheDestination) Close() error {
+ logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference))
+ return d.destination.Close()
+}
+
+func (d *blobCacheDestination) SupportedManifestMIMETypes() []string {
+ return d.destination.SupportedManifestMIMETypes()
+}
+
+func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error {
+ return d.destination.SupportsSignatures(ctx)
+}
+
+func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression {
+ return d.destination.DesiredLayerCompression()
+}
+
+func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool {
+ return d.destination.AcceptsForeignLayerURLs()
+}
+
+func (d *blobCacheDestination) MustMatchRuntimeOS() bool {
+ return d.destination.MustMatchRuntimeOS()
+}
+
+func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.destination.IgnoresEmbeddedDockerReference()
+}
+
+// Decompress and save the contents of the decompressReader stream into the passed-in temporary
+// file. If we successfully save all of the data, rename the file to match the digest of the data,
+// and make notes about the relationship between the file that holds a copy of the compressed data
+// and this new file.
+func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
+ defer wg.Done()
+ // Decompress from and digest the reading end of that pipe.
+ decompressed, err3 := archive.DecompressStream(decompressReader)
+ digester := digest.Canonical.Digester()
+ if err3 == nil {
+ // Read the decompressed data through the filter over the pipe, blocking until the
+ // writing end is closed.
+ _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
+ } else {
+ // Drain the pipe to keep from stalling the PutBlob() thread.
+ io.Copy(ioutil.Discard, decompressReader)
+ }
+ decompressReader.Close()
+ decompressed.Close()
+ tempFile.Close()
+ // Determine the name that we should give to the uncompressed copy of the blob.
+ decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig))
+ if err3 == nil {
+ // Rename the temporary file.
+ if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
+ logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ } else {
+ *alternateDigest = digester.Digest()
+ // Note the relationship between the two files.
+ if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
+ }
+ if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
+ }
+ }
+ } else {
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ }
+}
+
+func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ var tempfile *os.File
+ var err error
+ var n int
+ var alternateDigest digest.Digest
+ wg := new(sync.WaitGroup)
+ defer wg.Wait()
+ compression := archive.Uncompressed
+ if inputInfo.Digest != "" {
+ filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ if err == nil {
+ stream = io.TeeReader(stream, tempfile)
+ defer func() {
+ if err == nil {
+ if err = os.Rename(tempfile.Name(), filename); err != nil {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename)
+ }
+ } else {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ }
+ tempfile.Close()
+ }()
+ } else {
+ logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err)
+ }
+ if !isConfig {
+ initial := make([]byte, 8)
+ n, err = stream.Read(initial)
+ if n > 0 {
+ // Build a Reader that will still return the bytes that we just
+ // read, for PutBlob()'s sake.
+ stream = io.MultiReader(bytes.NewReader(initial[:n]), stream)
+ if n >= len(initial) {
+ compression = archive.DetectCompression(initial[:n])
+ }
+ if compression != archive.Uncompressed {
+ // The stream is compressed, so create a file which we'll
+ // use to store a decompressed copy.
+ decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ if err2 != nil {
+ logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2)
+ decompressedTemp.Close()
+ } else {
+ // Write a copy of the compressed data to a pipe,
+ // closing the writing end of the pipe after
+ // PutBlob() returns.
+ decompressReader, decompressWriter := io.Pipe()
+ defer decompressWriter.Close()
+ stream = io.TeeReader(stream, decompressWriter)
+ // Let saveStream() close the reading end and handle the temporary file.
+ wg.Add(1)
+ go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest)
+ }
+ }
+ }
+ }
+ }
+ newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+ if err != nil {
+ return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference))
+ }
+ if alternateDigest.Validate() == nil {
+ logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory)
+ } else {
+ logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory)
+ }
+ return newBlobInfo, nil
+}
+
+func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute)
+ if err != nil || present {
+ return present, reusedInfo, err
+ }
+
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig))
+ f, err := os.Open(filename)
+ if err == nil {
+ defer f.Close()
+ uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ return true, uploadedInfo, nil
+ }
+ }
+
+ return false, types.BlobInfo{}, nil
+}
+
+func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte) error {
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
+ } else {
+ filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false))
+ if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
+ logrus.Warnf("error saving manifest as %q: %v", filename, err)
+ }
+ }
+ return d.destination.PutManifest(ctx, manifestBytes)
+}
+
+func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+ return d.destination.PutSignatures(ctx, signatures)
+}
+
+func (d *blobCacheDestination) Commit(ctx context.Context) error {
+ return d.destination.Commit(ctx)
+}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index b54663f5d..e3a4fe62a 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -71,6 +71,10 @@ var (
LayerFlags = []cli.Flag{
cli.BoolFlag{
+ Name: "force-rm",
+ Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
+ },
+ cli.BoolFlag{
Name: "layers",
Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()),
},
@@ -108,6 +112,10 @@ var (
Usage: "use `[username[:password]]` for accessing the registry",
},
cli.BoolFlag{
+ Name: "disable-compression, D",
+ Usage: "don't compress layers by default",
+ },
+ cli.BoolFlag{
Name: "disable-content-trust",
Usage: "This is a Docker specific option and is a NOOP",
},
@@ -115,10 +123,6 @@ var (
Name: "file, f",
Usage: "`pathname or URL` of a Dockerfile",
},
- cli.BoolFlag{
- Name: "force-rm",
- Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
- },
cli.StringFlag{
Name: "format",
Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.",
@@ -192,6 +196,12 @@ var (
Name: "add-host",
Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])",
},
+ cli.StringFlag{
+ Name: "blob-cache",
+ Value: "",
+ Usage: "assume image blobs in the specified directory will be available for pushing",
+ Hidden: true, // this is here mainly so that we can test the API during integration tests
+ },
cli.StringSliceFlag{
Name: "cap-add",
Usage: "add the specified capability when running (default [])",
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index b87eb95c7..41fdea8b1 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -282,7 +282,7 @@ func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
DockerCertPath: c.String("cert-dir"),
}
if c.IsSet("tls-verify") {
- ctx.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify")
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
ctx.OCIInsecureSkipTLSVerify = !c.BoolT("tls-verify")
ctx.DockerDaemonInsecureSkipTLSVerify = !c.BoolT("tls-verify")
}
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 52269541a..e677c8925 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -5,6 +5,7 @@ import (
"io"
"strings"
+ "github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -40,6 +41,10 @@ type PullOptions struct {
// image name alone can not be resolved to a reference to a source
// image. No separator is implicitly added.
Transport string
+ // BlobDirectory is the name of a directory in which we'll attempt to
+ // store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ BlobDirectory string
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) {
@@ -146,11 +151,11 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
srcRef, err := alltransports.ParseImageName(spec)
if err != nil {
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
spec = transport + spec
@@ -182,6 +187,14 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
if err != nil {
return nil, errors.Wrapf(err, "error parsing image name %q", destName)
}
+ var maybeCachedDestRef types.ImageReference = destRef
+ if options.BlobDirectory != "" {
+ cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(destRef), options.BlobDirectory)
+ }
+ maybeCachedDestRef = cachedRef
+ }
policy, err := signature.DefaultPolicy(sc)
if err != nil {
@@ -200,7 +213,8 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
}()
logrus.Debugf("copying %q to %q", spec, destName)
- if _, err := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, destRef, nil, "")); err != nil {
+ if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil {
+ logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", spec, destName, err)
return nil, err
}
return destRef, nil
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index 718ef4e36..5d2cd6a32 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -451,7 +451,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
copyWithTar := b.copyWithTar(nil, nil)
- builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes)
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
return err
}
@@ -493,15 +493,21 @@ func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts
return mounts, nil
}
-func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string) ([]specs.Mount, error) {
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
var mounts []specs.Mount
+ hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
for _, volume := range builtinVolumes {
subdir := digest.Canonical.FromString(volume).Hex()
volumePath := filepath.Join(containerDir, "buildah-volumes", subdir)
+ srcPath := filepath.Join(mountPoint, volume)
+ initializeVolume := false
// If we need to, initialize the volume path's initial contents.
- if _, err := os.Stat(volumePath); err != nil && os.IsNotExist(err) {
+ if _, err := os.Stat(volumePath); err != nil {
+ if !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", volumePath, volume)
+ }
logrus.Debugf("setting up built-in volume at %q", volumePath)
if err = os.MkdirAll(volumePath, 0755); err != nil {
return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume)
@@ -509,11 +515,21 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
if err = label.Relabel(volumePath, mountLabel, false); err != nil {
return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume)
}
- srcPath := filepath.Join(mountPoint, volume)
- stat, err := os.Stat(srcPath)
- if err != nil {
+ initializeVolume = true
+ }
+ stat, err := os.Stat(srcPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
}
+ if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil {
+ return nil, errors.Wrapf(err, "error creating directory %q for volume %q", srcPath, volume)
+ }
+ if stat, err = os.Stat(srcPath); err != nil {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
+ }
+ }
+ if initializeVolume {
if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
return nil, errors.Wrapf(err, "failed to chmod %q for volume %q", volumePath, volume)
}
@@ -1044,24 +1060,31 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
- hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
- if err != nil {
- return err
- }
- resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
- if err != nil {
- return err
- }
+ bindFiles := make(map[string]string)
+ namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...)
+ volumes := b.Volumes()
- if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
- return err
+ if !contains(volumes, "/etc/hosts") {
+ hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/hosts"] = hostFile
+
+ if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
+ return err
+ }
}
- bindFiles := map[string]string{
- "/etc/hosts": hostFile,
- "/etc/resolv.conf": resolvFile,
+ if !contains(volumes, "/etc/resolv.conf") {
+ resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/resolv.conf"] = resolvFile
}
- err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, append(b.NamespaceOptions, options.NamespaceOptions...))
+
+ err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions)
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
}
@@ -1081,41 +1104,35 @@ func (b *Builder) Run(command []string, options RunOptions) error {
switch isolation {
case IsolationOCI:
- // The default is --rootless=auto, which makes troubleshooting a bit harder.
- // rootlessFlag := []string{"--rootless=false"}
- // for _, arg := range options.Args {
- // if strings.HasPrefix(arg, "--rootless") {
- // rootlessFlag = nil
- // }
- // }
- // options.Args = append(options.Args, rootlessFlag...)
var moreCreateArgs []string
if options.NoPivot {
moreCreateArgs = []string{"--no-pivot"}
} else {
moreCreateArgs = nil
}
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
case IsolationChroot:
err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
case IsolationOCIRootless:
if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil {
return err
}
- rootlessFlag := []string{"--rootless=true"}
- for _, arg := range options.Args {
- if strings.HasPrefix(arg, "--rootless") {
- rootlessFlag = nil
- }
- }
- options.Args = append(options.Args, rootlessFlag...)
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
default:
err = errors.Errorf("don't know how to run this command")
}
return err
}
+func contains(volumes []string, v string) bool {
+ for _, i := range volumes {
+ if i == v {
+ return true
+ }
+ }
+ return false
+}
+
func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error {
switch isolation {
case IsolationOCIRootless:
@@ -1123,10 +1140,22 @@ func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions)
logrus.Debugf("Forcing use of an IPC namespace.")
}
options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)})
- if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host {
- logrus.Debugf("Disabling network namespace.")
+ _, err := exec.LookPath("slirp4netns")
+ hostNetworking := err != nil
+ networkNamespacePath := ""
+ if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ hostNetworking = ns.Host
+ networkNamespacePath = ns.Path
+ if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) {
+ logrus.Debugf("Disabling network namespace configuration.")
+ networkNamespacePath = ""
+ }
}
- options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
+ options.NamespaceOptions.AddOrReplace(NamespaceOption{
+ Name: string(specs.NetworkNamespace),
+ Host: hostNetworking,
+ Path: networkNamespacePath,
+ })
if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host {
logrus.Debugf("Forcing use of a PID namespace.")
}
@@ -1227,9 +1256,10 @@ type runUsingRuntimeSubprocOptions struct {
ConfigureNetworks []string
MoreCreateArgs []string
ContainerName string
+ Isolation Isolation
}
-func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
+func (b *Builder) runUsingRuntimeSubproc(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
var confwg sync.WaitGroup
config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
Options: options,
@@ -1240,6 +1270,7 @@ func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bo
ConfigureNetworks: configureNetworks,
MoreCreateArgs: moreCreateArgs,
ContainerName: containerName,
+ Isolation: isolation,
})
if conferr != nil {
return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand)
@@ -1318,7 +1349,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
// Run the container, start to finish.
- status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
+ status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
if err != nil {
fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
os.Exit(1)
@@ -1333,7 +1364,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
-func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
+func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
// Lock the caller to a single OS-level thread.
runtime.LockOSThread()
@@ -1490,7 +1521,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetwork
}()
if configureNetwork {
- teardown, err := runConfigureNetwork(options, configureNetworks, pid, containerName, spec.Process.Args)
+ teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, spec.Process.Args)
if teardown != nil {
defer teardown()
}
@@ -1623,9 +1654,81 @@ func runCollectOutput(fds, closeBeforeReadingFds []int) string {
}
return b.String()
}
+func setupRootlessNetwork(pid int) (teardown func(), err error) {
+ slirp4netns, err := exec.LookPath("slirp4netns")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot find slirp4netns")
+ }
+
+ rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe")
+ }
+ defer rootlessSlirpSyncR.Close()
+
+ // Be sure there are no fds inherited to slirp4netns except the sync pipe
+ files, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot list open fds")
+ }
+ for _, f := range files {
+ fd, err := strconv.Atoi(f.Name())
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot parse fd")
+ }
+ if fd == int(rootlessSlirpSyncW.Fd()) {
+ continue
+ }
+ unix.CloseOnExec(fd)
+ }
+
+ cmd := exec.Command(slirp4netns, "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0")
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
+ cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
+
+ err = cmd.Start()
+ rootlessSlirpSyncW.Close()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot start slirp4netns")
+ }
-func runConfigureNetwork(options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
+ b := make([]byte, 1)
+ for {
+ if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil {
+ return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout")
+ }
+ if _, err := rootlessSlirpSyncR.Read(b); err == nil {
+ break
+ } else {
+ if os.IsTimeout(err) {
+ // Check if the process is still running.
+ var status syscall.WaitStatus
+ _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to read slirp4netns process status")
+ }
+ if status.Exited() || status.Signaled() {
+ return nil, errors.New("slirp4netns failed")
+ }
+
+ continue
+ }
+ return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe")
+ }
+ }
+
+ return func() {
+ cmd.Process.Kill()
+ cmd.Wait()
+ }, nil
+}
+
+func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
var netconf, undo []*libcni.NetworkConfigList
+
+ if isolation == IsolationOCIRootless {
+ return setupRootlessNetwork(pid)
+ }
// Scan for CNI configuration files.
confdir := options.CNIConfigDir
files, err := libcni.ConfFiles(confdir, []string{".conf"})
@@ -1956,7 +2059,7 @@ func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Bo
for i := range scm {
fds, err := unix.ParseUnixRights(&scm[i])
if err != nil {
- return -1, errors.Wrapf(err, "error parsing unix rights control message: %v")
+ return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i])
}
logrus.Debugf("fds: %v", fds)
if len(fds) == 0 {
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go
index fbe623660..2a970b8d6 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/unshare/unshare.go
@@ -55,6 +55,10 @@ func (c *Cmd) Start() error {
}
c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags))
+ // Please the libpod "rootless" package to find the expected env variables.
+ c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done")
+ c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid()))
+
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 09aa7e1eb..66a4e535a 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -175,11 +175,11 @@ func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) {
// isRegistryInsecure checks if the named registry is marked as not secure
func isRegistryInsecure(registry string, sc *types.SystemContext) (bool, error) {
- registries, err := sysregistriesv2.GetRegistries(sc)
+ reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
if err != nil {
return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc))
}
- if reginfo := sysregistriesv2.FindRegistry(registry, registries); reginfo != nil {
+ if reginfo != nil {
if reginfo.Insecure {
logrus.Debugf("registry %q is marked insecure in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
} else {
@@ -193,11 +193,11 @@ func isRegistryInsecure(registry string, sc *types.SystemContext) (bool, error)
// isRegistryBlocked checks if the named registry is marked as blocked
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
- registries, err := sysregistriesv2.GetRegistries(sc)
+ reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
if err != nil {
return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc))
}
- if reginfo := sysregistriesv2.FindRegistry(registry, registries); reginfo != nil {
+ if reginfo != nil {
if reginfo.Blocked {
logrus.Debugf("registry %q is marked as blocked in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
} else {
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 3a415a7f3..427c8db28 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -7,10 +7,8 @@ import (
"net/url"
"os"
"path"
- "path/filepath"
"strconv"
"strings"
- "syscall"
"github.com/containers/image/directory"
dockerarchive "github.com/containers/image/docker/archive"
@@ -31,6 +29,10 @@ import (
const (
minimumTruncatedIDLength = 3
+ // DefaultTransport is a prefix that we apply to an image name if we
+ // can't find one in the local Store, in order to generate a source
+ // reference for the image that we can then copy to the local Store.
+ DefaultTransport = "docker://"
)
var (
@@ -89,6 +91,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
}
}
+ name = strings.TrimPrefix(name, DefaultTransport)
// If the image name already included a domain component, we're done.
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
@@ -119,12 +122,11 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
// Figure out the list of registries.
var registries []string
- allRegistries, err := sysregistriesv2.GetRegistries(sc)
+ searchRegistries, err := sysregistriesv2.FindUnqualifiedSearchRegistries(sc)
if err != nil {
logrus.Debugf("unable to read configured registries to complete %q: %v", name, err)
- registries = []string{}
}
- for _, registry := range sysregistriesv2.FindUnqualifiedSearchRegistries(allRegistries) {
+ for _, registry := range searchRegistries {
if !registry.Blocked {
registries = append(registries, registry.URL)
}
@@ -450,60 +452,6 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap,
return uid, gid, nil
}
-// UnsharedRootPath returns a location under ($XDG_DATA_HOME/containers/storage,
-// or $HOME/.local/share/containers/storage, or
-// (the user's home directory)/.local/share/containers/storage, or an error.
-func UnsharedRootPath(homedir string) (string, error) {
- // If $XDG_DATA_HOME is defined...
- if envDataHome, haveDataHome := os.LookupEnv("XDG_DATA_HOME"); haveDataHome {
- return filepath.Join(envDataHome, "containers", "storage"), nil
- }
- // If $XDG_DATA_HOME is not defined, but $HOME is defined...
- if envHomedir, haveHomedir := os.LookupEnv("HOME"); haveHomedir {
- // Default to the user's $HOME/.local/share/containers/storage subdirectory.
- return filepath.Join(envHomedir, ".local", "share", "containers", "storage"), nil
- }
- // If we know where our home directory is...
- if homedir != "" {
- // Default to the user's homedir/.local/share/containers/storage subdirectory.
- return filepath.Join(homedir, ".local", "share", "containers", "storage"), nil
- }
- return "", errors.New("unable to determine a --root location: neither $XDG_DATA_HOME nor $HOME is set")
-}
-
-// UnsharedRunrootPath returns $XDG_RUNTIME_DIR/run, /var/run/user/(the user's UID)/run, or an error.
-func UnsharedRunrootPath(uid string) (string, error) {
- // If $XDG_RUNTIME_DIR is defined...
- if envRuntimeDir, haveRuntimeDir := os.LookupEnv("XDG_RUNTIME_DIR"); haveRuntimeDir {
- return filepath.Join(envRuntimeDir, "run"), nil
- }
- var runtimeDir string
- // If $XDG_RUNTIME_DIR is not defined, but we know our UID...
- if uid != "" {
- tmpDir := filepath.Join("/var/run/user", uid)
- os.MkdirAll(tmpDir, 0700)
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- runtimeDir = filepath.Join(resolvedHome, "rundir")
- }
- if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
- return "", errors.New("could not set XDG_RUNTIME_DIR")
- }
- return runtimeDir, nil
-}
-
// GetPolicyContext sets up, initializes and returns a new context for the specified policy
func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
policy, err := signature.DefaultPolicy(ctx)
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index d79412afc..61325114c 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,9 +3,10 @@ github.com/blang/semver master
github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image 5e5b67d6b1cf43cc349128ec3ed7d5283a6cc0d1
-github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75
-github.com/containers/storage 41294c85d97bef688e18f710402895dbecde3308
+github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067
+github.com/boltdb/bolt master
+github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e
+github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@@ -36,9 +37,9 @@ github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc master
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
+github.com/opencontainers/selinux master
github.com/openshift/imagebuilder master
-github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
github.com/pborman/uuid master
github.com/pkg/errors master
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index 313d802b3..013080e8d 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -13,6 +13,7 @@ import (
"time"
"github.com/containers/image/image"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
@@ -24,14 +25,16 @@ import (
)
type digestingReader struct {
- source io.Reader
- digester digest.Digester
- expectedDigest digest.Digest
- validationFailed bool
+ source io.Reader
+ digester digest.Digester
+ expectedDigest digest.Digest
+ validationFailed bool
+ validationSucceeded bool
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
-// and set validationFailed to true if the source stream does not match expectedDigest.
+// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
+// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
@@ -64,6 +67,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
d.validationFailed = true
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
+ d.validationSucceeded = true
}
return n, err
}
@@ -71,21 +75,22 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
- cachedDiffIDs map[digest.Digest]digest.Digest
dest types.ImageDestination
rawSource types.ImageSource
reportWriter io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
+ blobInfoCache types.BlobInfoCache
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
- c *copier
- manifestUpdates *types.ManifestUpdateOptions
- src types.Image
- diffIDsAreNeeded bool
- canModifyManifest bool
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src types.Image
+ diffIDsAreNeeded bool
+ canModifyManifest bool
+ canSubstituteBlobs bool
}
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
@@ -141,12 +146,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}()
c := &copier{
- cachedDiffIDs: make(map[digest.Digest]digest.Digest),
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressInterval: options.ProgressInterval,
progress: options.Progress,
+ // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
+ // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
+ // we might want to add a separate CommonCtx — or would that be too confusing?
+ blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
}
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
@@ -235,6 +243,13 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
src: src,
// diffIDsAreNeeded is computed later
canModifyManifest: len(sigs) == 0,
+ // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
}
if err := ic.updateEmbeddedDockerReference(); err != nil {
@@ -498,32 +513,24 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
- // Check if we already have a blob with this digest
- haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
- }
- // If we already have a cached diffID for this blob, we don't need to compute it
- diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
- // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
- if haveBlob && !diffIDIsNeeded {
- // Check the blob sizes match, if we were given a size this time
- if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
- return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
- }
- srcInfo.Size = extantBlobSize
- // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
- blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
+ cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
+ diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
+
+ // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
+ if !diffIDIsNeeded {
+ reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
+ }
+ if reused {
+ ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ return blobInfo, cachedDiffID, nil
}
- ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
- return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
}
// Fallback: copy the layer, computing the diffID if we need to do so
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
- srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
@@ -543,11 +550,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
return blobInfo, diffIDResult.digest, nil
}
} else {
- return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil
+ return blobInfo, cachedDiffID, nil
}
}
@@ -624,7 +633,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
- // Note that we don't use a stronger "validationSucceeded" indicator, because
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
@@ -660,8 +669,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
+ var compressionOperation types.LayerCompression
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
+ compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
@@ -674,6 +685,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
+ compressionOperation = types.Decompress
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
@@ -684,6 +696,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else {
logrus.Debugf("Using original blob without modification")
+ compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
}
@@ -699,7 +712,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// === Finally, send the layer stream to dest.
- uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
+ uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}
@@ -722,6 +735,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
+ if digestingReader.validationSucceeded {
+ // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
+ // (because inputInfo.Digest == "", this must have been computed afresh).
+ switch compressionOperation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
+ }
+ }
return uploadedInfo, nil
}
diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go
index d888931fe..d75c195b2 100644
--- a/vendor/github.com/containers/image/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/directory/directory_dest.go
@@ -127,10 +127,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -169,27 +170,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
-func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
index 5e17c37c0..3625def80 100644
--- a/vendor/github.com/containers/image/directory/directory_src.go
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -49,7 +49,9 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest))
if err != nil {
return nil, -1, err
diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/docker/cache.go
new file mode 100644
index 000000000..64ad57a7c
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/cache.go
@@ -0,0 +1,23 @@
+package docker
+
+import (
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+)
+
+// bicTransportScope returns a BICTransportScope appropriate for ref.
+func bicTransportScope(ref dockerReference) types.BICTransportScope {
+ // Blobs can be reused across the whole registry.
+ return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
+}
+
+// newBICLocationReference returns a BICLocationReference appropriate for ref.
+func newBICLocationReference(ref dockerReference) types.BICLocationReference {
+ // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
+ return types.BICLocationReference{Opaque: ref.ref.Name()}
+}
+
+// parseBICLocationReference returns a repository for encoded lr.
+func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
+ return reference.ParseNormalizedNamed(lr.Opaque)
+}
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index 6d2c5b670..7f55dbe7f 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/pkg/docker/config"
+ "github.com/containers/image/pkg/sysregistriesv2"
"github.com/containers/image/pkg/tlsclientconfig"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/client"
@@ -69,30 +70,33 @@ type extensionSignatureList struct {
}
type bearerToken struct {
- Token string `json:"token"`
- AccessToken string `json:"access_token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt time.Time `json:"issued_at"`
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ expirationTime time.Time
}
// dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct {
// The following members are set by newDockerClient and do not change afterwards.
- sys *types.SystemContext
- registry string
+ sys *types.SystemContext
+ registry string
+ client *http.Client
+ insecureSkipTLSVerify bool
+ // The following members are not set by newDockerClient and must be set by callers if needed.
username string
password string
- client *http.Client
signatureBase signatureStorageBase
scope authScope
+ extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
challenges []challenge
supportsSignatures bool
- // The following members are private state for setupRequestAuth, both are valid if token != nil.
- token *bearerToken
- tokenExpiration time.Time
+ // Private state for setupRequestAuth
+ tokenCache map[string]bearerToken
}
type authScope struct {
@@ -128,6 +132,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
+ token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
return token, nil
}
@@ -194,13 +199,26 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
if err != nil {
return nil, err
}
- remoteName := reference.Path(ref.ref)
- return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName)
+ client, err := newDockerClient(sys, registry, ref.ref.Name())
+ if err != nil {
+ return nil, err
+ }
+ client.username = username
+ client.password = password
+ client.signatureBase = sigBase
+ client.scope.actions = actions
+ client.scope.remoteName = reference.Path(ref.ref)
+ return client, nil
}
-// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
-func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
+// newDockerClient returns a new dockerClient instance for the given registry
+// and reference. The reference is used to query the registry configuration
+// and can either be a registry (e.g, "registry.com[:5000]"), a repository
+// (e.g., "registry.com[:5000][/some/namespace]/repo").
+// Please note that newDockerClient does not set all members of dockerClient
+// (e.g., username and password); those must be set by callers if necessary.
+func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
hostName := registry
if registry == dockerHostname {
registry = dockerRegistry
@@ -221,33 +239,44 @@ func newDockerClientWithDetails(sys *types.SystemContext, registry, username, pa
return nil, err
}
- if sys != nil && sys.DockerInsecureSkipTLSVerify {
- tr.TLSClientConfig.InsecureSkipVerify = true
+ // Check if TLS verification shall be skipped (default=false) which can
+ // either be specified in the sysregistriesv2 configuration or via the
+ // SystemContext, whereas the SystemContext is prioritized.
+ skipVerify := false
+ if sys != nil && sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ // Only use the SystemContext if the actual value is defined.
+ skipVerify = sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
+ } else {
+ reg, err := sysregistriesv2.FindRegistry(sys, reference)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error loading registries")
+ }
+ if reg != nil {
+ skipVerify = reg.Insecure
+ }
}
+ tr.TLSClientConfig.InsecureSkipVerify = skipVerify
return &dockerClient{
- sys: sys,
- registry: registry,
- username: username,
- password: password,
- client: &http.Client{Transport: tr},
- signatureBase: sigBase,
- scope: authScope{
- actions: actions,
- remoteName: remoteName,
- },
+ sys: sys,
+ registry: registry,
+ client: &http.Client{Transport: tr},
+ insecureSkipTLSVerify: skipVerify,
+ tokenCache: map[string]bearerToken{},
}, nil
}
// CheckAuth validates the credentials by attempting to log into the registry
// returns an error if an error occcured while making the http request or the status code received was 401
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
- newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
+ client, err := newDockerClient(sys, registry, registry)
if err != nil {
return errors.Wrapf(err, "error creating new docker client")
}
+ client.username = username
+ client.password = password
- resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
+ resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
if err != nil {
return err
}
@@ -299,16 +328,21 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
return nil, errors.Wrapf(err, "error getting username and password")
}
- // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
- // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
+ // The /v2/_catalog endpoint has been disabled for docker.io therefore
+ // the call made to that endpoint will fail. So using the v1 hostname
+ // for docker.io for simplicity of implementation and the fact that it
+ // returns search results.
+ hostname := registry
if registry == dockerHostname {
- registry = dockerV1Hostname
+ hostname = dockerV1Hostname
}
- client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
+ client, err := newDockerClient(sys, hostname, registry)
if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client")
}
+ client.username = username
+ client.password = password
// Only try the v1 search endpoint if the search query is not empty. If it is
// empty skip to the v2 endpoint.
@@ -432,24 +466,23 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
- if c.token == nil || time.Now().After(c.tokenExpiration) {
- realm, ok := challenge.Parameters["realm"]
- if !ok {
- return errors.Errorf("missing realm in bearer auth challenge")
- }
- service, _ := challenge.Parameters["service"] // Will be "" if not present
- var scope string
- if c.scope.remoteName != "" && c.scope.actions != "" {
- scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
- }
- token, err := c.getBearerToken(req.Context(), realm, service, scope)
+ cacheKey := ""
+ scopes := []authScope{c.scope}
+ if c.extraScope != nil {
+ // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
+ cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
+ scopes = append(scopes, *c.extraScope)
+ }
+ token, ok := c.tokenCache[cacheKey]
+ if !ok || time.Now().After(token.expirationTime) {
+ t, err := c.getBearerToken(req.Context(), challenge, scopes)
if err != nil {
return err
}
- c.token = token
- c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ token = *t
+ c.tokenCache[cacheKey] = token
}
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
return nil
default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
@@ -459,7 +492,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
return nil
}
-func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
+func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return nil, errors.Errorf("missing realm in bearer auth challenge")
+ }
+
authReq, err := http.NewRequest("GET", realm, nil)
if err != nil {
return nil, err
@@ -469,11 +507,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
if c.username != "" {
getParams.Add("account", c.username)
}
- if service != "" {
+ if service, ok := challenge.Parameters["service"]; ok && service != "" {
getParams.Add("service", service)
}
- if scope != "" {
- getParams.Add("scope", scope)
+ for _, scope := range scopes {
+ if scope.remoteName != "" && scope.actions != "" {
+ getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ }
}
authReq.URL.RawQuery = getParams.Encode()
if c.username != "" && c.password != "" {
@@ -530,7 +570,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return nil
}
err := ping("https")
- if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
+ if err != nil && c.insecureSkipTLSVerify {
err = ping("http")
}
if err != nil {
@@ -554,7 +594,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return true
}
isV1 := pingV1("https")
- if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
+ if !isV1 && c.insecureSkipTLSVerify {
isV1 = pingV1("http")
}
if isV1 {
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index 9bbffef93..2f471f648 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
@@ -113,17 +114,21 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
- haveBlob, size, err := d.HasBlob(ctx, inputInfo)
+ // This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
+ // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
+ // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
+ haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
}
@@ -160,7 +165,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
- // FIXME: DELETE uploadLocation on failure
+ // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
@@ -177,19 +182,15 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
+// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
-func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
- }
- checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
-
+func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
+ checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
@@ -202,7 +203,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
+ return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -211,8 +212,134 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
}
}
-func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
+func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
+ u := url.URL{
+ Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
+ RawQuery: url.Values{
+ "mount": {srcDigest.String()},
+ "from": {reference.Path(srcRepo)},
+ }.Encode(),
+ }
+ mountPath := u.String()
+ logrus.Debugf("Trying to mount %s", mountPath)
+ res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusCreated:
+ logrus.Debugf("... mount OK")
+ return nil
+ case http.StatusAccepted:
+ // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
+ // Abort, and let the ultimate caller do an upload when its ready, instead.
+ // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return errors.Wrap(err, "Error determining upload URL after a mount attempt")
+ }
+ logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
+ res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
+ if err != nil {
+ logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
+ } else {
+ defer res2.Body.Close()
+ if res2.StatusCode != http.StatusNoContent {
+ logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
+ }
+ }
+ // Anyway, if canceling the upload fails, ignore it and return the more important error:
+ return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ default:
+ logrus.Debugf("Error mounting, response %#v", *res)
+ return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ }
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ if info.Digest == "" {
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+
+ // First, check whether the blob happens to already exist at the destination.
+ exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ if exists {
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
+ }
+
+ // Then try reusing blobs from other locations.
+
+ // Checking candidateRepo, and mounting from it, requires an expanded token scope.
+ // We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
+ if d.c.extraScope != nil {
+ return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
+ }
+ defer func() {
+ d.c.extraScope = nil
+ }()
+ for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
+ candidateRepo, err := parseBICLocationReference(candidate.Location)
+ if err != nil {
+ logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
+ continue
+ }
+ logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
+
+ // Sanity checks:
+ if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
+ logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
+ continue
+ }
+ if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+ logrus.Debug("... Already tried the primary destination")
+ continue
+ }
+
+ // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
+ d.c.extraScope = &authScope{
+ remoteName: reference.Path(candidateRepo),
+ actions: "pull",
+ }
+ // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
+ // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
+ // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
+ // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
+ // Even worse, docker/distribution does not actually reasonably implement canceling uploads
+ // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
+ // so, be a nice client and don't create unnecesary upload sessions on the server.
+ exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
+ if err != nil {
+ logrus.Debugf("... Failed: %v", err)
+ continue
+ }
+ if !exists {
+ // FIXME? Should we drop the blob from cache here (and elsewhere?)?
+ continue // logrus.Debug() already happened in blobExists
+ }
+ if candidateRepo.Name() != d.ref.ref.Name() {
+ if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
+ logrus.Debugf("... Mount failed: %v", err)
+ continue
+ }
+ }
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
+ }
+
+ return false, types.BlobInfo{}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index aedab9731..fbed6297f 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -162,7 +162,9 @@ func getBlobSize(resp *http.Response) int64 {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
@@ -177,6 +179,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
// print url also
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
}
+ cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
return res.Body, getBlobSize(res), nil
}
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
index d6510ccf1..ad8a48a03 100644
--- a/vendor/github.com/containers/image/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -85,10 +85,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
@@ -120,12 +121,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
}
// Maybe the blob has been already sent
- ok, size, err := d.HasBlob(ctx, inputInfo)
+ ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
if isConfig {
@@ -151,29 +152,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with
-// the matching digest which can be reapplied using ReapplyBlob. Unlike
-// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
-// the blob must also be returned. If the destination does not contain the
-// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
-// returns a non-nil error only on an unexpected failure.
-func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
+ return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := d.blobs[info.Digest]; ok {
- return true, blob.Size, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
- return false, -1, nil
-}
-
-// ReapplyBlob informs the image destination that a blob for which HasBlob
-// previously returned true would have been passed to PutBlob if it had
-// returned false. Like HasBlob and unlike PutBlob, the digest can not be
-// empty. If the blob is a filesystem layer, this signifies that the changes
-// it describes need to be applied again when composing a filesystem tree.
-func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return false, types.BlobInfo{}, nil
}
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index 942893a81..d94ed9783 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -398,7 +398,9 @@ func (r uncompressedReadCloser) Close() error {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
index b639ab714..cee60f824 100644
--- a/vendor/github.com/containers/image/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
if err != nil {
return nil, err
}
@@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
if historyEntry.EmptyLayer {
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer")
}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
index 298db360d..6fe2a9a32 100644
--- a/vendor/github.com/containers/image/image/oci.go
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go
index 3c6b7dffa..3997ac2ee 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go
@@ -77,20 +77,27 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.unpackedDest.IgnoresEmbeddedDockerReference()
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
-func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
-func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.unpackedDest.HasBlob(ctx, info)
-}
-
-func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.unpackedDest.ReapplyBlob(ctx, info)
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
index d04773c1f..084d818f7 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -76,9 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
- return s.unpackedSrc.GetBlob(ctx, info)
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.unpackedSrc.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go
index 351632750..b5a6e08f8 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go
@@ -107,13 +107,15 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -173,30 +175,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
-
-func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
index 33115c00d..086a7040d 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -92,8 +92,10 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
index dbd04f10b..0cce1e6c7 100644
--- a/vendor/github.com/containers/image/openshift/openshift.go
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -212,11 +212,13 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, 0, err
}
- return s.docker.GetBlob(ctx, info)
+ return s.docker.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
@@ -379,23 +381,23 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.docker.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.docker.HasBlob(ctx, info)
-}
-
-func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.docker.ReapplyBlob(ctx, info)
+func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index afff7dc1b..064898948 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -132,7 +132,15 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
@@ -322,12 +330,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
-func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
-
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
- return false, 0, err
+ return false, types.BlobInfo{}, err
}
d.repo = repo
}
@@ -335,29 +349,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, size, nil
-}
-
-func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index 1f325b2a7..e73cae198 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -255,8 +255,10 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
blob := info.Digest.Hex()
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
new file mode 100644
index 000000000..4ee809134
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
@@ -0,0 +1,329 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/boltdb/bolt"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
+ // we can simply start over with a different filename; update blobInfoCacheFilename.
+
+ // FIXME: For CRI-O, does this need to hide information between different users?
+
+ // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
+ uncompressedDigestBucket = []byte("uncompressedDigest")
+ // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
+ // (as a set of key=digest, value="" pairs)
+ digestByUncompressedBucket = []byte("digestByUncompressed")
+ // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
+ // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
+ knownLocationsBucket = []byte("knownLocations")
+)
+
+// Concurrency:
+// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
+// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
+
+// pathLock contains a lock for a specific BoltDB database path.
+type pathLock struct {
+ refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
+ mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
+}
+
+var (
+ // pathLocks contains a lock for each currently open file.
+ // This must be global so that independently created instances of boltDBCache exclude each other.
+ // The map is protected by pathLocksMutex.
+ // FIXME? Should this be based on device:inode numbers instead of paths instead?
+ pathLocks = map[string]*pathLock{}
+ pathLocksMutex = sync.Mutex{}
+)
+
+// lockPath obtains the pathLock for path.
+// The caller must call unlockPath eventually.
+func lockPath(path string) {
+ pl := func() *pathLock { // A scope for defer
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if ok {
+ pl.refCount++
+ } else {
+ pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
+ pathLocks[path] = pl
+ }
+ return pl
+ }()
+ pl.mutex.Lock()
+}
+
+// unlockPath releases the pathLock for path.
+func unlockPath(path string) {
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if !ok {
+ // Should this return an error instead? BlobInfoCache ultimately ignores errors…
+ panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
+ }
+ pl.mutex.Unlock()
+ pl.refCount--
+ if pl.refCount == 0 {
+ delete(pathLocks, path)
+ }
+}
+
+// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
+//
+// Note that we don’t keep the database open across operations, because that would lock the file and block any other
+// users; instead, we need to open/close it for every single write or lookup.
+type boltDBCache struct {
+ path string
+}
+
+// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
+// Most users should call DefaultCache instead.
+func NewBoltDBCache(path string) types.BlobInfoCache {
+ return &boltDBCache{path: path}
+}
+
+// view returns runs the specified fn within a read-only transaction on the database.
+func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
+ // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
+ // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
+ // a read lock, blocking any future writes.
+ // Hence this preliminary check, which is RACY: Another process could remove the file
+ // between the Lstat call and opening the database.
+ if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
+ return err
+ }
+
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.View(fn)
+}
+
+// update returns runs the specified fn within a read-write transaction on the database.
+func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, nil)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.Update(fn)
+}
+
+// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
+func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
+ if b := tx.Bucket(uncompressedDigestBucket); b != nil {
+ if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
+ d, err := digest.Parse(string(uncompressedBytes))
+ if err == nil {
+ return d
+ }
+ // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ }
+ // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if b := tx.Bucket(digestByUncompressedBucket); b != nil {
+ if b = b.Bucket([]byte(anyDigest.String())); b != nil {
+ c := b.Cursor()
+ if k, _ := c.First(); k != nil { // The bucket is non-empty
+ return anyDigest
+ }
+ }
+ }
+ return ""
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ var res digest.Digest
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ res = bdc.uncompressedDigest(tx, anyDigest)
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
+ if err != nil {
+ return err
+ }
+ key := []byte(anyDigest.String())
+ if previousBytes := b.Get(key); previousBytes != nil {
+ previous, err := digest.Parse(string(previousBytes))
+ if err != nil {
+ return err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ }
+ if err := b.Put(key, []byte(uncompressed.String())); err != nil {
+ return err
+ }
+
+ b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
+ if err != nil {
+ return err
+ }
+ value, err := time.Now().MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
+func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
+ b := scopeBucket.Bucket([]byte(digest.String()))
+ if b == nil {
+ return candidates
+ }
+ _ = b.ForEach(func(k, v []byte) error {
+ t := time.Time{}
+ if err := t.UnmarshalBinary(v); err != nil {
+ return err
+ }
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: types.BICLocationReference{Opaque: string(k)},
+ },
+ lastSeen: t,
+ })
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ var uncompressedDigestValue digest.Digest // = ""
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ scopeBucket := tx.Bucket(knownLocationsBucket)
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
+ if scopeBucket == nil {
+ return nil
+ }
+
+ res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
+ if canSubstitute {
+ if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
+ b := tx.Bucket(digestByUncompressedBucket)
+ if b != nil {
+ b = b.Bucket([]byte(uncompressedDigestValue.String()))
+ if b != nil {
+ if err := b.ForEach(func(k, _ []byte) error {
+ d, err := digest.Parse(string(k))
+ if err != nil {
+ return err
+ }
+ if d != primaryDigest && d != uncompressedDigestValue {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, d)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ if uncompressedDigestValue != primaryDigest {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
+ }
+ }
+ }
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
new file mode 100644
index 000000000..6da9f2805
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
@@ -0,0 +1,63 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/types"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // blobInfoCacheFilename is the file name used for blob info caches.
+ // If the format changes in an incompatible way, increase the version number.
+ blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
+ // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
+ systemBlobInfoCacheDir = "/var/lib/containers/cache"
+)
+
+// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
+// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
+func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
+ if sys != nil && sys.BlobInfoCacheDir != "" {
+ return sys.BlobInfoCacheDir, nil
+ }
+
+ // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
+ // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
+ if euid == 0 {
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
+ }
+ return systemBlobInfoCacheDir, nil
+ }
+
+ // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
+ dataDir := os.Getenv("XDG_DATA_HOME")
+ if dataDir == "" {
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ dataDir = filepath.Join(home, ".local", "share")
+ }
+ return filepath.Join(dataDir, "containers", "cache"), nil
+}
+
+// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
+func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
+ dir, err := blobInfoCacheDir(sys, os.Geteuid())
+ if err != nil {
+ logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
+ return NewMemoryCache()
+ }
+ path := filepath.Join(dir, blobInfoCacheFilename)
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err)
+ return NewMemoryCache()
+ }
+
+ logrus.Debugf("Using blob info cache at %s", path)
+ return NewBoltDBCache(path)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
new file mode 100644
index 000000000..1ce7dee13
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
@@ -0,0 +1,123 @@
+package blobinfocache
+
+import (
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// locationKey only exists to make lookup in knownLocations easier.
+type locationKey struct {
+ transport string
+ scope types.BICTransportScope
+ blobDigest digest.Digest
+}
+
+// memoryCache implements an in-memory-only BlobInfoCache
+type memoryCache struct {
+ uncompressedDigests map[digest.Digest]digest.Digest
+ digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
+ knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
+}
+
+// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
+// This is primarily intended for tests, but also used as a fallback if DefaultCache
+// can’t determine, or set up, the location for a persistent cache.
+// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
+func NewMemoryCache() types.BlobInfoCache {
+ return &memoryCache{
+ uncompressedDigests: map[digest.Digest]digest.Digest{},
+ digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
+ knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
+ }
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ if d, ok := mem.uncompressedDigests[anyDigest]; ok {
+ return d
+ }
+ // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
+ return anyDigest
+ }
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ mem.uncompressedDigests[anyDigest] = uncompressed
+
+ anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
+ if !ok {
+ anyDigestSet = map[digest.Digest]struct{}{}
+ mem.digestsByUncompressed[uncompressed] = anyDigestSet
+ }
+ anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
+ locationScope, ok := mem.knownLocations[key]
+ if !ok {
+ locationScope = map[types.BICLocationReference]time.Time{}
+ mem.knownLocations[key] = locationScope
+ }
+ locationScope[location] = time.Now() // Possibly overwriting an older entry.
+}
+
+// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
+func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
+ locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
+ for l, t := range locations {
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: l,
+ },
+ lastSeen: t,
+ })
+ }
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
+ var uncompressedDigest digest.Digest // = ""
+ if canSubstitute {
+ if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
+ otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
+ for d := range otherDigests {
+ if d != primaryDigest && d != uncompressedDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, d)
+ }
+ }
+ if uncompressedDigest != primaryDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
+ }
+ }
+ }
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
new file mode 100644
index 000000000..5658d89ff
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
@@ -0,0 +1,47 @@
+package blobinfocache
+
+import (
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// noCache implements a dummy BlobInfoCache which records no data.
+type noCache struct {
+}
+
+// NoCache implements BlobInfoCache by not recording any data.
+//
+// This exists primarily for implementations of configGetter for Manifest.Inspect,
+// because configs only have one representation.
+// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
+var NoCache types.BlobInfoCache = noCache{}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
new file mode 100644
index 000000000..02709aa1c
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
@@ -0,0 +1,108 @@
+package blobinfocache
+
+import (
+ "sort"
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
+// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
+// This is a heuristic/guess, and could well use a different value.
+const replacementAttempts = 5
+
+// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
+type candidateWithTime struct {
+ candidate types.BICReplacementCandidate // The replacement candidate
+ lastSeen time.Time // Time the candidate was last known to exist (either read or written)
+}
+
+// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
+// along with the specially-treated digest values for the implementation of sort.Interface.Less
+type candidateSortState struct {
+ cs []candidateWithTime // The entries to sort
+ primaryDigest digest.Digest // The digest the user actually asked for
+ uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
+}
+
+func (css *candidateSortState) Len() int {
+ return len(css.cs)
+}
+
+func (css *candidateSortState) Less(i, j int) bool {
+ xi := css.cs[i]
+ xj := css.cs[j]
+
+ // primaryDigest entries come first, more recent first.
+ // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
+ // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
+
+ // First, deal with the primaryDigest/uncompressedDigest cases:
+ if xi.candidate.Digest != xj.candidate.Digest {
+ // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
+ if xi.candidate.Digest == css.primaryDigest {
+ return true
+ }
+ if xj.candidate.Digest == css.primaryDigest {
+ return false
+ }
+ if css.uncompressedDigest != "" {
+ if xi.candidate.Digest == css.uncompressedDigest {
+ return false
+ }
+ if xj.candidate.Digest == css.uncompressedDigest {
+ return true
+ }
+ }
+ } else { // xi.candidate.Digest == xj.candidate.Digest
+ // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
+ if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ }
+
+ // Neither of the digests are primaryDigest/uncompressedDigest:
+ if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ // Fall back to digest, if timestamps end up _exactly_ the same (how?!)
+ return xi.candidate.Digest < xj.candidate.Digest
+}
+
+func (css *candidateSortState) Swap(i, j int) {
+ css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
+}
+
+// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
+// number of entries to limit, only to make testing simpler.
+func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
+ // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
+ // compare equal.
+ sort.Sort(&candidateSortState{
+ cs: cs,
+ primaryDigest: primaryDigest,
+ uncompressedDigest: uncompressedDigest,
+ })
+
+ resLength := len(cs)
+ if resLength > maxCandidates {
+ resLength = maxCandidates
+ }
+ res := make([]types.BICReplacementCandidate, resLength)
+ for i := range res {
+ res[i] = cs[i].candidate
+ }
+ return res
+}
+
+// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
+// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
+// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
+//
+// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
+// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
+func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
+ return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
+}
diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
index 067f512ad..afc7312d1 100644
--- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
@@ -3,7 +3,7 @@ package sysregistriesv2
import (
"fmt"
"io/ioutil"
- "net/url"
+ "os"
"path/filepath"
"strings"
"sync"
@@ -82,8 +82,8 @@ func (e *InvalidRegistries) Error() string {
}
// parseURL parses the input string, performs some sanity checks and returns
-// the sanitized input string. An error is returned in case parsing fails or
-// or if URI scheme or user is set.
+// the sanitized input string. An error is returned if the input string is
+// empty or if contains an "http{s,}://" prefix.
func parseURL(input string) (string, error) {
trimmed := strings.TrimRight(input, "/")
@@ -91,49 +91,11 @@ func parseURL(input string) (string, error) {
return "", &InvalidRegistries{s: "invalid URL: cannot be empty"}
}
- // Ultimately, we expect input of the form example.com[/namespace/…], a prefix
- // of a fully-expended reference (containers/image/docker/Reference.String()).
- // c/image/docker/Reference does not currently provide such a parser.
- // So, we use url.Parse("http://"+trimmed) below to ~verify the format, possibly
- // letting some invalid input in, trading that off for a simpler parser.
- //
- // url.Parse("http://"+trimmed) is, sadly, too permissive, notably for
- // trimmed == "http://example.com/…", url.Parse("http://http://example.com/…")
- // is accepted and parsed as
- // {Scheme: "http", Host: "http:", Path: "//example.com/…"}.
- //
- // So, first we do an explicit check for an unwanted scheme prefix:
-
- // This will parse trimmed=="http://example.com/…" with Scheme: "http". Perhaps surprisingly,
- // it also succeeds for the input we want to accept, in different ways:
- // "example.com" -> {Scheme:"", Opaque:"", Path:"example.com"}
- // "example.com/repo" -> {Scheme:"", Opaque:"", Path:"example.com/repo"}
- // "example.com:5000" -> {Scheme:"example.com", Opaque:"5000"}
- // "example.com:5000/repo" -> {Scheme:"example.com", Opaque:"5000/repo"}
- uri, err := url.Parse(trimmed)
- if err != nil {
- return "", &InvalidRegistries{s: fmt.Sprintf("invalid URL '%s': %v", input, err)}
- }
-
- // Check if a URI Scheme is set.
- // Note that URLs that do not start with a slash after the scheme are
- // interpreted as `scheme:opaque[?query][#fragment]`; see above for examples.
- if uri.Scheme != "" && uri.Opaque == "" {
+ if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input)
return "", &InvalidRegistries{s: msg}
}
- uri, err = url.Parse("http://" + trimmed)
- if err != nil {
- msg := fmt.Sprintf("invalid URL '%s': sanitized URL did not parse: %v", input, err)
- return "", &InvalidRegistries{s: msg}
- }
-
- if uri.User != nil {
- msg := fmt.Sprintf("invalid URL '%s': user/password are not supported", trimmed)
- return "", &InvalidRegistries{s: msg}
- }
-
return trimmed, nil
}
@@ -279,7 +241,18 @@ var configMutex = sync.Mutex{}
// are synchronized via configMutex.
var configCache = make(map[string][]Registry)
+// InvalidateCache invalidates the registry cache. This function is meant to be
+// used for long-running processes that need to reload potential changes made to
+// the cached registry config files.
+func InvalidateCache() {
+ configMutex.Lock()
+ defer configMutex.Unlock()
+ configCache = make(map[string][]Registry)
+}
+
// GetRegistries loads and returns the registries specified in the config.
+// Note the parsed content of registry config files is cached. For reloading,
+// use `InvalidateCache` and re-call `GetRegistries`.
func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
configPath := getConfigPath(ctx)
@@ -293,6 +266,13 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
// load the config
config, err := loadRegistryConf(configPath)
if err != nil {
+ // Return an empty []Registry if we use the default config,
+ // which implies that the config path of the SystemContext
+ // isn't set. Note: if ctx.SystemRegistriesConfPath points to
+ // the default config, we will still return an error.
+ if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
+ return []Registry{}, nil
+ }
return nil, err
}
@@ -323,23 +303,33 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
// FindUnqualifiedSearchRegistries returns all registries that are configured
// for unqualified image search (i.e., with Registry.Search == true).
-func FindUnqualifiedSearchRegistries(registries []Registry) []Registry {
+func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, error) {
+ registries, err := GetRegistries(ctx)
+ if err != nil {
+ return nil, err
+ }
+
unqualified := []Registry{}
for _, reg := range registries {
if reg.Search {
unqualified = append(unqualified, reg)
}
}
- return unqualified
+ return unqualified, nil
}
// FindRegistry returns the Registry with the longest prefix for ref. If no
// Registry prefixes the image, nil is returned.
-func FindRegistry(ref string, registries []Registry) *Registry {
+func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
+ registries, err := GetRegistries(ctx)
+ if err != nil {
+ return nil, err
+ }
+
reg := Registry{}
prefixLen := 0
for _, r := range registries {
- if strings.HasPrefix(ref, r.Prefix) {
+ if strings.HasPrefix(ref, r.Prefix+"/") || ref == r.Prefix {
length := len(r.Prefix)
if length > prefixLen {
reg = r
@@ -348,9 +338,9 @@ func FindRegistry(ref string, registries []Registry) *Registry {
}
}
if prefixLen != 0 {
- return &reg
+ return &reg, nil
}
- return nil
+ return nil, nil
}
// Reads the global registry file from the filesystem. Returns a byte array.
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index d1b010a76..bd6813119 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
@@ -99,8 +100,10 @@ func (s storageImageSource) Close() error {
return nil
}
-// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
if info.Digest == image.GzippedEmptyLayerDigest {
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
}
@@ -317,9 +320,17 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
}
-// PutBlob stores a layer or data blob in our temporary directory, checking that any information
-// in the blobinfo matches the incoming data.
-func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
@@ -370,6 +381,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
if blobSize < 0 {
blobSize = counter.Count
}
+ // This is safe because we have just computed both values ourselves.
+ cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return types.BlobInfo{
Digest: blobDigest,
Size: blobSize,
@@ -377,59 +390,82 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
-// reapplied using ReapplyBlob.
-//
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if blobinfo.Digest == "" {
- return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
}
if err := blobinfo.Digest.Validate(); err != nil {
- return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
}
+
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, size, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: size,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].UncompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].CompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].CompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if canSubstitute.
+ if canSubstitute {
+ if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
+ }
+ if len(layers) > 0 {
+ s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: uncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
}
- // Nope, we don't have it.
- return false, -1, nil
-}
-// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
-// same one when it walks the list in the manifest.
-func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) {
- present, size, err := s.HasBlob(ctx, blobinfo)
- if !present {
- return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
- }
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
- }
- blobinfo.Size = size
- return blobinfo, nil
+ // Nope, we don't have it.
+ return false, types.BlobInfo{}, nil
}
// computeID computes a recommended image ID based on information we have so far. If
@@ -514,8 +550,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
+ // Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // that relies on using a blob digest that has never been seeen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- has, _, err := s.HasBlob(ctx, blob.BlobInfo)
+ has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
if err != nil {
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
}
diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go
index 17af60b30..ee963b8d8 100644
--- a/vendor/github.com/containers/image/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/tarball/tarball_src.go
@@ -207,7 +207,10 @@ func (is *tarballImageSource) Close() error {
return nil
}
-func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
// We should only be asked about things in the manifest. Maybe the configuration blob.
if blobinfo.Digest == is.configID {
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index 5d05b711a..dda332776 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -100,6 +100,82 @@ type BlobInfo struct {
MediaType string
}
+// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
+// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
+// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICTransportScope struct {
+ Opaque string
+}
+
+// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
+// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
+// can look it up using BlobInfoCache.CandidateLocations.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICLocationReference struct {
+ Opaque string
+}
+
+// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
+type BICReplacementCandidate struct {
+ Digest digest.Digest
+ Location BICLocationReference
+}
+
+// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
+//
+// It records two kinds of data:
+// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
+// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
+// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
+// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
+//
+// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
+// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
+//
+// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
+// compress/decompress blobs for their own purposes.
+//
+// - Known blob locations, managed by individual transports:
+// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
+// recording transport-specific information that allows the transport to reuse the blob in the future;
+// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+//
+// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
+// can be directly reused within a registry, or mounted across registries within a registry server.)
+//
+// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
+// users of the cahce should just fall back to copying the blobs the usual way.
+type BlobInfoCache interface {
+ // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+ // May return anyDigest if it is known to be uncompressed.
+ // Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+ UncompressedDigest(anyDigest digest.Digest) digest.Digest
+ // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+ // It’s allowed for anyDigest == uncompressed.
+ // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+ // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+ // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+ RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
+
+ // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+ // and can be reused given the opaque location data.
+ RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
+ // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+ // within the specified (transport scope) (if they still exist, which is not guaranteed).
+ //
+ // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+ // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+ // uncompressed digest.
+ CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
+}
+
// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
// This is primarily useful for copying images around; for examining their properties, Image (below)
// is usually more useful.
@@ -120,7 +196,8 @@ type ImageSource interface {
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
- GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error)
+ // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+ GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
@@ -148,8 +225,7 @@ const (
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
//
// There is a specific required order for some of the calls:
-// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
-// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
+// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
//
@@ -183,17 +259,19 @@ type ImageDestination interface {
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
+ // May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
- PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
- // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
- // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
- // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
- // it returns a non-nil error only on an unexpected failure.
- HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error)
- // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
- ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error)
+ PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
+ // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+ // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ // May use and/or update cache.
+ TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
@@ -324,6 +402,30 @@ type DockerAuthConfig struct {
Password string
}
+// OptionalBool is a boolean with an additional undefined value, which is meant
+// to be used in the context of user input to distinguish between a
+// user-specified value and a default value.
+type OptionalBool byte
+
+const (
+ // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written.
+ OptionalBoolUndefined OptionalBool = iota
+ // OptionalBoolTrue represents the boolean true.
+ OptionalBoolTrue
+ // OptionalBoolFalse represents the boolean false.
+ OptionalBoolFalse
+)
+
+// NewOptionalBool converts the input bool into either OptionalBoolTrue or
+// OptionalBoolFalse. The function is meant to avoid boilerplate code of users.
+func NewOptionalBool(b bool) OptionalBool {
+ o := OptionalBoolFalse
+ if b == true {
+ o = OptionalBoolTrue
+ }
+ return o
+}
+
// SystemContext allows parameterizing access to implicitly-accessed resources,
// like configuration files in /etc and users' login state in their home directory.
// Various components can share the same field only if their semantics is exactly
@@ -351,6 +453,8 @@ type SystemContext struct {
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
OSChoice string
+ // If not "", overrides the system's default directory containing a blob info cache.
+ BlobInfoCacheDir string
// Additional tags when creating or copying a docker-archive.
DockerArchiveAdditionalTags []reference.NamedTagged
@@ -376,7 +480,7 @@ type SystemContext struct {
// Ignored if DockerCertPath is non-empty.
DockerPerHostCertDirPath string
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
- DockerInsecureSkipTLSVerify bool
+ DockerInsecureSkipTLSVerify OptionalBool
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
DockerAuthConfig *DockerAuthConfig
// if not "", an User-Agent header is added to each request when contacting a registry.
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index 246c0096a..88537981a 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -34,7 +34,7 @@ github.com/xeipuuv/gojsonschema master
github.com/xeipuuv/gojsonreference master
github.com/xeipuuv/gojsonpointer master
github.com/tchap/go-patricia v2.2.6
-github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
+github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
@@ -43,3 +43,4 @@ github.com/syndtr/gocapability master
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
github.com/ulikunitz/xz v0.5.4
+github.com/boltdb/bolt master
diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md
index f68cc55c3..fef46a689 100644
--- a/vendor/github.com/containers/storage/README.md
+++ b/vendor/github.com/containers/storage/README.md
@@ -2,7 +2,7 @@
layers, container images, and containers. A `containers-storage` CLI wrapper
is also included for manual and scripting use.
-To build the CLI wrapper, use 'make build-binary'.
+To build the CLI wrapper, use 'make binary'.
Operations which use VMs expect to launch them using 'vagrant', defaulting to
using its 'libvirt' provider. The boxes used are also available for the
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index 0a125331d..beaf41f39 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -147,6 +147,13 @@ func (c *Container) ProcessLabel() string {
return ""
}
+func (c *Container) MountOpts() []string {
+ if mountOpts, ok := c.Flags["MountOpts"].([]string); ok {
+ return mountOpts
+ }
+ return nil
+}
+
func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers))
for i := range r.containers {
@@ -293,6 +300,9 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
}
+ if options.MountOpts != nil {
+ options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
+ }
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go
index 6e83808d4..aef6becfe 100644
--- a/vendor/github.com/containers/storage/containers_ffjson.go
+++ b/vendor/github.com/containers/storage/containers_ffjson.go
@@ -1,6 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: containers.go
-//
package storage
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index f14ba24b9..ca69816be 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -405,7 +405,7 @@ func atomicRemove(source string) error {
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
if _, e := os.Stat(source); !os.IsNotExist(e) {
- return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
+ return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
}
default:
return errors.Wrapf(err, "error preparing atomic delete")
@@ -441,7 +441,7 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
- if err := a.mount(id, m, options.MountLabel, parents); err != nil {
+ if err := a.mount(id, m, parents, options); err != nil {
return "", err
}
}
@@ -585,7 +585,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
return layers, nil
}
-func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
+func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error {
a.Lock()
defer a.Unlock()
@@ -596,7 +596,7 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
rw := a.getDiffPath(id)
- if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
+ if err := a.aufsMount(layers, rw, target, options); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
}
return nil
@@ -643,7 +643,7 @@ func (a *Driver) Cleanup() error {
return mountpk.Unmount(a.root)
}
-func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
+func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
defer func() {
if err != nil {
Unmount(target)
@@ -657,7 +657,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
if useDirperm() {
offset += len(",dirperm1")
}
- b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
+ b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
index := 0
@@ -670,21 +670,25 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
}
opts := "dio,xino=/dev/shm/aufs.xino"
- if a.mountOptions != "" {
- opts += fmt.Sprintf(",%s", a.mountOptions)
+ mountOptions := a.mountOptions
+ if len(options.Options) > 0 {
+ mountOptions = strings.Join(options.Options, ",")
+ }
+ if mountOptions != "" {
+ opts += fmt.Sprintf(",%s", mountOptions)
}
if useDirperm() {
opts += ",dirperm1"
}
- data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
+ data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel)
if err = mount("none", target, "aufs", 0, data); err != nil {
return
}
for ; index < len(ro); index++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
- data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
+ data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel)
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
return
}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index adc34d209..567cda9d3 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -640,6 +640,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if err != nil {
return "", err
}
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("btrfs driver does not support mount options")
+ }
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy.go b/vendor/github.com/containers/storage/drivers/copy/copy.go
new file mode 100644
index 000000000..2617824c5
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/copy/copy.go
@@ -0,0 +1,277 @@
+// +build linux
+
+package copy
+
+/*
+#include <linux/fs.h>
+
+#ifndef FICLONE
+#define FICLONE _IOW(0x94, 9, int)
+#endif
+*/
+import "C"
+import (
+ "container/list"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "github.com/containers/storage/pkg/pools"
+ "github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "golang.org/x/sys/unix"
+)
+
+// Mode indicates whether to use hardlink or copy content
+type Mode int
+
+const (
+ // Content creates a new file, and copies the content of the file
+ Content Mode = iota
+ // Hardlink creates a new hardlink to the existing file
+ Hardlink
+)
+
+func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
+ srcFile, err := os.Open(srcPath)
+ if err != nil {
+ return err
+ }
+ defer srcFile.Close()
+
+ // If the destination file already exists, we shouldn't blow it away
+ dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
+ if err != nil {
+ return err
+ }
+ defer dstFile.Close()
+
+ if *copyWithFileClone {
+ _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
+ if err == nil {
+ return nil
+ }
+
+ *copyWithFileClone = false
+ if err == unix.EXDEV {
+ *copyWithFileRange = false
+ }
+ }
+ if *copyWithFileRange {
+ err = doCopyWithFileRange(srcFile, dstFile, fileinfo)
+ // Trying the file_clone may not have caught the exdev case
+ // as the ioctl may not have been available (therefore EINVAL)
+ if err == unix.EXDEV || err == unix.ENOSYS {
+ *copyWithFileRange = false
+ } else {
+ return err
+ }
+ }
+ return legacyCopy(srcFile, dstFile)
+}
+
+func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error {
+ amountLeftToCopy := fileinfo.Size()
+
+ for amountLeftToCopy > 0 {
+ n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0)
+ if err != nil {
+ return err
+ }
+
+ amountLeftToCopy = amountLeftToCopy - int64(n)
+ }
+
+ return nil
+}
+
+func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
+ _, err := pools.Copy(dstFile, srcFile)
+
+ return err
+}
+
+func copyXattr(srcPath, dstPath, attr string) error {
+ data, err := system.Lgetxattr(srcPath, attr)
+ if err != nil {
+ return err
+ }
+ if data != nil {
+ if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type fileID struct {
+ dev uint64
+ ino uint64
+}
+
+type dirMtimeInfo struct {
+ dstPath *string
+ stat *syscall.Stat_t
+}
+
+// DirCopy copies or hardlinks the contents of one directory to another,
+// properly handling xattrs, and soft links
+//
+// Copying xattrs can be opted out of by passing false for copyXattrs.
+func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
+ copyWithFileRange := true
+ copyWithFileClone := true
+
+ // This is a map of source file inodes to dst file paths
+ copiedFiles := make(map[fileID]string)
+
+ dirsToSetMtimes := list.New()
+ err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(srcDir, srcPath)
+ if err != nil {
+ return err
+ }
+
+ dstPath := filepath.Join(dstDir, relPath)
+ if err != nil {
+ return err
+ }
+
+ stat, ok := f.Sys().(*syscall.Stat_t)
+ if !ok {
+ return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
+ }
+
+ isHardlink := false
+
+ switch f.Mode() & os.ModeType {
+ case 0: // Regular file
+ id := fileID{dev: stat.Dev, ino: stat.Ino}
+ if copyMode == Hardlink {
+ isHardlink = true
+ if err2 := os.Link(srcPath, dstPath); err2 != nil {
+ return err2
+ }
+ } else if hardLinkDstPath, ok := copiedFiles[id]; ok {
+ if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil {
+ return err2
+ }
+ } else {
+ if err2 := copyRegular(srcPath, dstPath, f, &copyWithFileRange, &copyWithFileClone); err2 != nil {
+ return err2
+ }
+ copiedFiles[id] = dstPath
+ }
+
+ case os.ModeDir:
+ if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ case os.ModeSymlink:
+ link, err := os.Readlink(srcPath)
+ if err != nil {
+ return err
+ }
+
+ if err := os.Symlink(link, dstPath); err != nil {
+ return err
+ }
+
+ case os.ModeNamedPipe:
+ fallthrough
+ case os.ModeSocket:
+ if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
+ return err
+ }
+
+ case os.ModeDevice:
+ if rsystem.RunningInUserNS() {
+ // cannot create a device if running in user namespace
+ return nil
+ }
+ if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("unknown file type for %s", srcPath)
+ }
+
+ // Everything below is copying metadata from src to dst. All this metadata
+ // already shares an inode for hardlinks.
+ if isHardlink {
+ return nil
+ }
+
+ if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
+ return err
+ }
+
+ if copyXattrs {
+ if err := doCopyXattrs(srcPath, dstPath); err != nil {
+ return err
+ }
+ }
+
+ isSymlink := f.Mode()&os.ModeSymlink != 0
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if !isSymlink {
+ if err := os.Chmod(dstPath, f.Mode()); err != nil {
+ return err
+ }
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ // nolint: unconvert
+ if f.IsDir() {
+ dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
+ } else if !isSymlink {
+ aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
+ if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{stat.Atim, stat.Mtim}
+ if err := system.LUtimesNano(dstPath, ts); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() {
+ mtimeInfo := e.Value.(*dirMtimeInfo)
+ ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim}
+ if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doCopyXattrs(srcPath, dstPath string) error {
+ if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
+ return err
+ }
+
+ // We need to copy this attribute if it appears in an overlay upper layer, as
+ // this function is used to copy those. It is set by overlay if a directory
+ // is removed and then re-created and should not inherit anything from the
+ // same dir in the lower dir.
+ return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
+}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index cbf67b3eb..b6f22e90a 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -2364,7 +2364,7 @@ func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
}
// MountDevice mounts the device if not already mounted.
-func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
+func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error {
info, err := devices.lookupDeviceWithLock(hash)
if err != nil {
return err
@@ -2396,8 +2396,17 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
options = joinMountOptions(options, "nouuid")
}
- options = joinMountOptions(options, devices.mountOptions)
- options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
+ mountOptions := devices.mountOptions
+ if len(moptions.Options) > 0 {
+ addNouuid := strings.Contains("nouuid", mountOptions)
+ mountOptions = strings.Join(moptions.Options, ",")
+ if addNouuid {
+ mountOptions = fmt.Sprintf("nouuid,%s", mountOptions)
+ }
+ }
+
+ options = joinMountOptions(options, mountOptions)
+ options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index 9fc082d7d..39a4fbe2c 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -9,8 +9,6 @@ import (
"path"
"strconv"
- "github.com/sirupsen/logrus"
-
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/idtools"
@@ -18,6 +16,7 @@ import (
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
+ "github.com/sirupsen/logrus"
)
func init() {
@@ -189,7 +188,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
// Mount the device
- if err := d.DeviceSet.MountDevice(id, mp, options.MountLabel); err != nil {
+ if err := d.DeviceSet.MountDevice(id, mp, options); err != nil {
d.ctr.Decrement(mp)
return "", err
}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 4569c7b59..476b55160 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -49,6 +49,7 @@ type MountOpts struct {
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap
GidMaps []idtools.IDMap
+ Options []string
}
// InitFunc initializes the storage driver.
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index 19da7d101..ab2c41e58 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
)
@@ -167,7 +168,9 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
}
defer driver.Put(id)
- options := &archive.TarOptions{}
+ options := &archive.TarOptions{
+ InUserNS: rsystem.RunningInUserNS(),
+ }
if applyMappings != nil {
options.UIDMaps = applyMappings.UIDs()
options.GIDMaps = applyMappings.GIDs()
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index d2cc65bca..df736c0a9 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -29,6 +29,7 @@ import (
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -340,6 +341,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
func (d *Driver) useNaiveDiff() bool {
useNaiveDiffLock.Do(func() {
+ if d.options.mountProgram != "" {
+ useNaiveDiffOnly = true
+ return
+ }
if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil {
logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
useNaiveDiffOnly = true
@@ -739,7 +744,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
- if d.options.mountOptions != "" {
+ if len(options.Options) > 0 {
+ opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
+ } else if d.options.mountOptions != "" {
opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts)
}
mountData := label.FormatMountLabel(opts, options.MountLabel)
@@ -841,6 +848,17 @@ func (d *Driver) isParent(id, parent string) bool {
return ld == parentDir
}
+func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
+ whiteoutFormat := archive.OverlayWhiteoutFormat
+ if d.options.mountProgram != "" {
+ // If we are using a mount program, we are most likely running
+ // as an unprivileged user that cannot use mknod, so fallback to the
+ // AUFS whiteout format.
+ whiteoutFormat = archive.AUFSWhiteoutFormat
+ }
+ return whiteoutFormat
+}
+
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent string, mountLabel string, diff io.Reader) (size int64, err error) {
if !d.isParent(id, parent) {
@@ -858,7 +876,8 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str
if err := untar(diff, applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
- WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ WhiteoutFormat: d.getWhiteoutFormat(),
+ InUserNS: rsystem.RunningInUserNS(),
}); err != nil {
return 0, err
}
@@ -911,7 +930,7 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string,
Compression: archive.Uncompressed,
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
- WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ WhiteoutFormat: d.getWhiteoutFormat(),
WhiteoutData: lowerDirs,
})
}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
new file mode 100644
index 000000000..8137fcf67
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
@@ -0,0 +1,7 @@
+package vfs
+
+import "github.com/containers/storage/drivers/copy"
+
+func dirCopy(srcDir, dstDir string) error {
+ return copy.DirCopy(srcDir, dstDir, copy.Content, false)
+}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
new file mode 100644
index 000000000..8ac80ee1d
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package vfs // import "github.com/containers/storage/drivers/vfs"
+
+import "github.com/containers/storage/pkg/chrootarchive"
+
+func dirCopy(srcDir, dstDir string) error {
+ return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
+}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index d10fb2607..f7f3c75ba 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -7,7 +7,6 @@ import (
"strings"
"github.com/containers/storage/drivers"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ostree"
"github.com/containers/storage/pkg/system"
@@ -15,8 +14,8 @@ import (
)
var (
- // CopyWithTar defines the copy method to use.
- CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
+ // CopyDir defines the copy method to use.
+ CopyDir = dirCopy
)
func init() {
@@ -141,7 +140,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
if err != nil {
return fmt.Errorf("%s: %s", parent, err)
}
- if err := CopyWithTar(parentDir, dir); err != nil {
+ if err := dirCopy(parentDir, dir); err != nil {
return err
}
}
@@ -181,6 +180,9 @@ func (d *Driver) Remove(id string) error {
// Get returns the directory for the given id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("vfs driver does not support mount options")
+ }
if st, err := os.Stat(dir); err != nil {
return "", err
} else if !st.IsDir() {
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 4ccf657dc..c6d86a4ab 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -367,6 +367,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel)
var dir string
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("windows driver does not support mount options")
+ }
rID, err := d.resolveID(id)
if err != nil {
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index cb4424f2d..c3ce6e869 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -52,7 +52,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
}
- file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
+ file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
if err != nil {
logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
@@ -366,8 +366,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return mountpoint, nil
}
+ mountOptions := d.options.mountOptions
+ if len(options.Options) > 0 {
+ mountOptions = strings.Join(options.Options, ",")
+ }
+
filesystem := d.zfsPath(id)
- opts := label.FormatMountLabel(d.options.mountOptions, options.MountLabel)
+ opts := label.FormatMountLabel(mountOptions, options.MountLabel)
logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 1275ab47c..0b532eb77 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -28,9 +28,8 @@ import (
)
const (
- tarSplitSuffix = ".tar-split.gz"
- incompleteFlag = "incomplete"
- compressionFlag = "diff-compression"
+ tarSplitSuffix = ".tar-split.gz"
+ incompleteFlag = "incomplete"
)
// A Layer is a record of a copy-on-write layer that's stored by the lower
@@ -542,8 +541,8 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
_, idInUse = r.byid[id]
}
}
- if _, idInUse := r.byid[id]; idInUse {
- return nil, -1, ErrDuplicateID
+ if duplicateLayer, idInUse := r.byid[id]; idInUse {
+ return duplicateLayer, -1, ErrDuplicateID
}
names = dedupeNames(names)
for _, name := range names {
@@ -841,8 +840,12 @@ func (r *layerStore) Delete(id string) error {
return ErrLayerUnknown
}
id = layer.ID
- if _, err := r.Unmount(id, true); err != nil {
- return err
+ // This check is needed for idempotency of delete where the layer could have been
+ // already unmounted (since c/storage gives you that API directly)
+ for layer.MountCount > 0 {
+ if _, err := r.Unmount(id, false); err != nil {
+ return err
+ }
}
err := r.driver.Remove(id)
if err == nil {
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
index 09b5d0f33..125b5d8c9 100644
--- a/vendor/github.com/containers/storage/layers_ffjson.go
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./layers.go
+// source: layers.go
package storage
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 4c4382625..8d6eaacf3 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -22,6 +22,7 @@ import (
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/promise"
"github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
)
@@ -1054,6 +1055,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
GIDMaps: tarMappings.GIDs(),
Compression: Uncompressed,
CopyPass: true,
+ InUserNS: rsystem.RunningInUserNS(),
}
archive, err := TarWithOptions(src, options)
if err != nil {
@@ -1068,6 +1070,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1087,6 +1090,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1186,6 +1190,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
UIDMaps: archiver.UntarIDMappings.UIDs(),
GIDMaps: archiver.UntarIDMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
err = archiver.Untar(r, filepath.Dir(dst), options)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index b9fa228e6..dde8d44d3 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
)
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
@@ -52,6 +53,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
if options == nil {
options = &archive.TarOptions{}
+ options.InUserNS = rsystem.RunningInUserNS()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go
new file mode 100644
index 000000000..c56aa86a2
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go
@@ -0,0 +1,56 @@
+package idtools
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func nonDigitsToWhitespace(r rune) rune {
+ if !strings.ContainsRune("0123456789", r) {
+ return ' '
+ }
+ return r
+}
+
+func parseTriple(spec []string) (container, host, size uint32, err error) {
+ cid, err := strconv.ParseUint(spec[0], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
+ }
+ hid, err := strconv.ParseUint(spec[1], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
+ }
+ sz, err := strconv.ParseUint(spec[2], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
+ }
+ return uint32(cid), uint32(hid), uint32(sz), nil
+}
+
+// ParseIDMap parses idmap triples from string.
+func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
+ for _, idMapSpec := range mapSpec {
+ idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
+ if len(idSpec)%3 != 0 {
+ return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
+ }
+ for i := range idSpec {
+ if i%3 != 0 {
+ continue
+ }
+ cid, hid, size, err := parseTriple(idSpec[i : i+3])
+ if err != nil {
+ return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
+ }
+ mapping := IDMap{
+ ContainerID: int(cid),
+ HostID: int(hid),
+ Size: int(size),
+ }
+ idmap = append(idmap, mapping)
+ }
+ }
+ return idmap, nil
+}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 7eaa82910..e0dd1b92f 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -8,7 +8,6 @@ import (
"os"
"path/filepath"
"reflect"
- "strconv"
"strings"
"sync"
"time"
@@ -22,6 +21,7 @@ import (
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
digest "github.com/opencontainers/go-digest"
@@ -502,6 +502,7 @@ type ContainerOptions struct {
IDMappingOptions
LabelOpts []string
Flags map[string]interface{}
+ MountOpts []string
}
type store struct {
@@ -1069,7 +1070,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, read
}
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc)
if err != nil {
- return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q")
+ return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q", parentLayer.ID)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
@@ -2145,21 +2146,20 @@ func (s *store) DeleteContainer(id string) error {
if err = rlstore.Delete(container.LayerID); err != nil {
return err
}
- if err = rcstore.Delete(id); err != nil {
- return err
- }
- middleDir := s.graphDriverName + "-containers"
- gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
- if err = os.RemoveAll(gcpath); err != nil {
- return err
- }
- rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
- if err = os.RemoveAll(rcpath); err != nil {
- return err
- }
- return nil
}
- return ErrNotALayer
+ if err = rcstore.Delete(id); err != nil {
+ return err
+ }
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(gcpath); err != nil {
+ return err
+ }
+ rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(rcpath); err != nil {
+ return err
+ }
+ return nil
}
}
return ErrNotAContainer
@@ -2280,10 +2280,14 @@ func (s *store) Version() ([][2]string, error) {
func (s *store) Mount(id, mountLabel string) (string, error) {
container, err := s.Container(id)
- var uidMap, gidMap []idtools.IDMap
+ var (
+ uidMap, gidMap []idtools.IDMap
+ mountOpts []string
+ )
if err == nil {
uidMap, gidMap = container.UIDMap, container.GIDMap
id = container.LayerID
+ mountOpts = container.MountOpts()
}
rlstore, err := s.LayerStore()
if err != nil {
@@ -2299,6 +2303,7 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
MountLabel: mountLabel,
UidMaps: uidMap,
GidMaps: gidMap,
+ Options: mountOpts,
}
return rlstore.Mount(id, options)
}
@@ -3203,56 +3208,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
}
- nonDigitsToWhitespace := func(r rune) rune {
- if strings.IndexRune("0123456789", r) == -1 {
- return ' '
- } else {
- return r
- }
- }
- parseTriple := func(spec []string) (container, host, size uint32, err error) {
- cid, err := strconv.ParseUint(spec[0], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
- }
- hid, err := strconv.ParseUint(spec[1], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
- }
- sz, err := strconv.ParseUint(spec[2], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
- }
- return uint32(cid), uint32(hid), uint32(sz), nil
+
+ uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
+ if err != nil {
+ fmt.Print(err)
+ } else {
+ storeOptions.UIDMap = append(storeOptions.UIDMap, uidmap...)
}
- parseIDMap := func(idMapSpec, mapSetting string) (idmap []idtools.IDMap) {
- if len(idMapSpec) > 0 {
- idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
- if len(idSpec)%3 != 0 {
- fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting)
- return nil
- }
- for i := range idSpec {
- if i%3 != 0 {
- continue
- }
- cid, hid, size, err := parseTriple(idSpec[i : i+3])
- if err != nil {
- fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting)
- return nil
- }
- mapping := idtools.IDMap{
- ContainerID: int(cid),
- HostID: int(hid),
- Size: int(size),
- }
- idmap = append(idmap, mapping)
- }
- }
- return idmap
+ gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
+ if err != nil {
+ fmt.Print(err)
+ } else {
+ storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...)
}
- storeOptions.UIDMap = append(storeOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...)
- storeOptions.GIDMap = append(storeOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...)
if os.Getenv("STORAGE_DRIVER") != "" {
storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
}
@@ -3271,3 +3239,23 @@ func init() {
ReloadConfigurationFile(defaultConfigFile, &DefaultStoreOptions)
}
+
+func GetDefaultMountOptions() ([]string, error) {
+ mountOpts := []string{
+ ".mountopt",
+ fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName),
+ }
+ for _, option := range DefaultStoreOptions.GraphDriverOptions {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return nil, err
+ }
+ key = strings.ToLower(key)
+ for _, m := range mountOpts {
+ if m == key {
+ return strings.Split(val, ","), nil
+ }
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index 2276d5531..fa52584d7 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -2,13 +2,14 @@ github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
-github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
+github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
+github.com/ostreedev/ostree-go master
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors master
github.com/pmezard/go-difflib v1.0.0
@@ -20,4 +21,5 @@ github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split v0.10.2
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
-github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+gotest.tools master
+github.com/google/go-cmp master
diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/shlex/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README
new file mode 100644
index 000000000..c86bcc066
--- /dev/null
+++ b/vendor/github.com/google/shlex/README
@@ -0,0 +1,2 @@
+go-shlex is a simple lexer for go that supports shell-style quoting,
+commenting, and escaping.
diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go
new file mode 100644
index 000000000..d98308bce
--- /dev/null
+++ b/vendor/github.com/google/shlex/shlex.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2012 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package shlex implements a simple lexer which splits input in to tokens using
+shell-style rules for quoting and commenting.
+
+The basic use case uses the default ASCII lexer to split a string into sub-strings:
+
+ shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
+
+To process a stream of strings:
+
+ l := NewLexer(os.Stdin)
+ for ; token, err := l.Next(); err != nil {
+ // process token
+ }
+
+To access the raw token stream (which includes tokens for comments):
+
+ t := NewTokenizer(os.Stdin)
+ for ; token, err := t.Next(); err != nil {
+ // process token
+ }
+
+*/
+package shlex
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// TokenType is a top-level token classification: A word, space, comment, unknown.
+type TokenType int
+
+// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
+type runeTokenClass int
+
+// the internal state used by the lexer state machine
+type lexerState int
+
+// Token is a (type, value) pair representing a lexographical token.
+type Token struct {
+ tokenType TokenType
+ value string
+}
+
+// Equal reports whether tokens a, and b, are equal.
+// Two tokens are equal if both their types and values are equal. A nil token can
+// never be equal to another token.
+func (a *Token) Equal(b *Token) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.tokenType != b.tokenType {
+ return false
+ }
+ return a.value == b.value
+}
+
+// Named classes of UTF-8 runes
+const (
+ spaceRunes = " \t\r\n"
+ escapingQuoteRunes = `"`
+ nonEscapingQuoteRunes = "'"
+ escapeRunes = `\`
+ commentRunes = "#"
+)
+
+// Classes of rune token
+const (
+ unknownRuneClass runeTokenClass = iota
+ spaceRuneClass
+ escapingQuoteRuneClass
+ nonEscapingQuoteRuneClass
+ escapeRuneClass
+ commentRuneClass
+ eofRuneClass
+)
+
+// Classes of lexographic token
+const (
+ UnknownToken TokenType = iota
+ WordToken
+ SpaceToken
+ CommentToken
+)
+
+// Lexer state machine states
+const (
+ startState lexerState = iota // no runes have been seen
+ inWordState // processing regular runes in a word
+ escapingState // we have just consumed an escape rune; the next rune is literal
+ escapingQuotedState // we have just consumed an escape rune within a quoted string
+ quotingEscapingState // we are within a quoted string that supports escaping ("...")
+ quotingState // we are within a string that does not support escaping ('...')
+ commentState // we are within a comment (everything following an unquoted or unescaped #
+)
+
+// tokenClassifier is used for classifying rune characters.
+type tokenClassifier map[rune]runeTokenClass
+
+func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
+ for _, runeChar := range runes {
+ typeMap[runeChar] = tokenType
+ }
+}
+
+// newDefaultClassifier creates a new classifier for ASCII characters.
+func newDefaultClassifier() tokenClassifier {
+ t := tokenClassifier{}
+ t.addRuneClass(spaceRunes, spaceRuneClass)
+ t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
+ t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
+ t.addRuneClass(escapeRunes, escapeRuneClass)
+ t.addRuneClass(commentRunes, commentRuneClass)
+ return t
+}
+
+// ClassifyRune classifiees a rune
+func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
+ return t[runeVal]
+}
+
+// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
+type Lexer Tokenizer
+
+// NewLexer creates a new lexer from an input stream.
+func NewLexer(r io.Reader) *Lexer {
+
+ return (*Lexer)(NewTokenizer(r))
+}
+
+// Next returns the next word, or an error. If there are no more words,
+// the error will be io.EOF.
+func (l *Lexer) Next() (string, error) {
+ for {
+ token, err := (*Tokenizer)(l).Next()
+ if err != nil {
+ return "", err
+ }
+ switch token.tokenType {
+ case WordToken:
+ return token.value, nil
+ case CommentToken:
+ // skip comments
+ default:
+ return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
+ }
+ }
+}
+
+// Tokenizer turns an input stream into a sequence of typed tokens
+type Tokenizer struct {
+ input bufio.Reader
+ classifier tokenClassifier
+}
+
+// NewTokenizer creates a new tokenizer from an input stream.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ input := bufio.NewReader(r)
+ classifier := newDefaultClassifier()
+ return &Tokenizer{
+ input: *input,
+ classifier: classifier}
+}
+
+// scanStream scans the stream for the next token using the internal state machine.
+// It will panic if it encounters a rune which it does not know how to handle.
+func (t *Tokenizer) scanStream() (*Token, error) {
+ state := startState
+ var tokenType TokenType
+ var value []rune
+ var nextRune rune
+ var nextRuneType runeTokenClass
+ var err error
+
+ for {
+ nextRune, _, err = t.input.ReadRune()
+ nextRuneType = t.classifier.ClassifyRune(nextRune)
+
+ if err == io.EOF {
+ nextRuneType = eofRuneClass
+ err = nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ switch state {
+ case startState: // no runes read yet
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ return nil, io.EOF
+ }
+ case spaceRuneClass:
+ {
+ }
+ case escapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ tokenType = WordToken
+ state = escapingState
+ }
+ case commentRuneClass:
+ {
+ tokenType = CommentToken
+ state = commentState
+ }
+ default:
+ {
+ tokenType = WordToken
+ value = append(value, nextRune)
+ state = inWordState
+ }
+ }
+ }
+ case inWordState: // in a regular word
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingState: // the rune after an escape character
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = inWordState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingQuotedState: // the next rune after an escape character, in double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = quotingEscapingState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingEscapingState: // in escaping double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingQuotedState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingState: // in non-escaping single quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case commentState: // in a comment
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ if nextRune == '\n' {
+ state = startState
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ } else {
+ value = append(value, nextRune)
+ }
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ default:
+ {
+ return nil, fmt.Errorf("Unexpected state: %v", state)
+ }
+ }
+ }
+}
+
+// Next returns the next token in the stream.
+func (t *Tokenizer) Next() (*Token, error) {
+ return t.scanStream()
+}
+
+// Split partitions a string into a slice of strings.
+func Split(s string) ([]string, error) {
+ l := NewLexer(strings.NewReader(s))
+ subStrings := make([]string, 0)
+ for {
+ word, err := l.Next()
+ if err != nil {
+ if err == io.EOF {
+ return subStrings, nil
+ }
+ return subStrings, err
+ }
+ subStrings = append(subStrings, word)
+ }
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
index 2a31cd3c5..bb27ac936 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
@@ -9,7 +9,7 @@ func InitLabels(options []string) (string, string, error) {
return "", "", nil
}
-func GetROMountLabel() string {
+func ROMountLabel() string {
return ""
}
@@ -25,7 +25,19 @@ func SetProcessLabel(processLabel string) error {
return nil
}
-func GetFileLabel(path string) (string, error) {
+func ProcessLabel() (string, error) {
+ return "", nil
+}
+
+func SetSocketLabel(processLabel string) error {
+ return nil
+}
+
+func SocketLabel() (string, error) {
+ return "", nil
+}
+
+func FileLabel(path string) (string, error) {
return "", nil
}
@@ -41,7 +53,7 @@ func Relabel(path string, fileLabel string, shared bool) error {
return nil
}
-func GetPidLabel(pid int) (string, error) {
+func PidLabel(pid int) (string, error) {
return "", nil
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
index 63c4edd05..de214b2d5 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
@@ -95,6 +95,17 @@ func SetProcessLabel(processLabel string) error {
return selinux.SetExecLabel(processLabel)
}
+// SetSocketLabel takes a process label and tells the kernel to assign the
+// label to the next socket that gets created
+func SetSocketLabel(processLabel string) error {
+ return selinux.SetSocketLabel(processLabel)
+}
+
+// SocketLabel retrieves the current default socket label setting
+func SocketLabel() (string, error) {
+ return selinux.SocketLabel()
+}
+
// ProcessLabel returns the process label that the kernel will assign
// to the next program executed by the current process. If "" is returned
// this indicates that the default labeling will happen for the process.
@@ -102,7 +113,7 @@ func ProcessLabel() (string, error) {
return selinux.ExecLabel()
}
-// GetFileLabel returns the label for specified path
+// FileLabel returns the label for specified path
func FileLabel(path string) (string, error) {
return selinux.FileLabel(path)
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index 2cd54eac1..7832f7497 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -385,6 +385,17 @@ func SetExecLabel(label string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), label)
}
+// SetSocketLabel takes a process label and tells the kernel to assign the
+// label to the next socket that gets created
+func SetSocketLabel(label string) error {
+ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid()), label)
+}
+
+// SocketLabel retrieves the current socket label setting
+func SocketLabel() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid()))
+}
+
// Get returns the Context as a string
func (c Context) Get() string {
if c["level"] != "" {
@@ -687,7 +698,11 @@ func Chcon(fpath string, label string, recurse bool) error {
return err
}
callback := func(p string, info os.FileInfo, err error) error {
- return SetFileLabel(p, label)
+ e := SetFileLabel(p, label)
+ if os.IsNotExist(e) {
+ return nil
+ }
+ return e
}
if recurse {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
index 5abf8a362..99efa155a 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
@@ -96,6 +96,19 @@ func SetExecLabel(label string) error {
return nil
}
+/*
+SetSocketLabel sets the SELinux label that the kernel will use for any programs
+that are executed by the current process thread, or an error.
+*/
+func SetSocketLabel(label string) error {
+ return nil
+}
+
+// SocketLabel retrieves the current socket label setting
+func SocketLabel() (string, error) {
+ return "", nil
+}
+
// Get returns the Context as a string
func (c Context) Get() string {
return ""
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
index 1c1afb119..d37965df6 100644
--- a/vendor/github.com/openshift/imagebuilder/builder.go
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -172,8 +172,11 @@ type Stage struct {
Node *parser.Node
}
-func NewStages(node *parser.Node, b *Builder) Stages {
+func NewStages(node *parser.Node, b *Builder) (Stages, error) {
var stages Stages
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return stages, err
+ }
for i, root := range SplitBy(node, command.From) {
name, _ := extractNameFromNode(root.Children[0])
if len(name) == 0 {
@@ -189,7 +192,36 @@ func NewStages(node *parser.Node, b *Builder) Stages {
Node: root,
})
}
- return stages
+ return stages, nil
+}
+
+func (b *Builder) extractHeadingArgsFromNode(node *parser.Node) error {
+ var args []*parser.Node
+ var children []*parser.Node
+ extract := true
+ for _, child := range node.Children {
+ if extract && child.Value == command.Arg {
+ args = append(args, child)
+ } else {
+ if child.Value == command.From {
+ extract = false
+ }
+ children = append(children, child)
+ }
+ }
+
+ for _, c := range args {
+ step := b.Step()
+ if err := step.Resolve(c); err != nil {
+ return err
+ }
+ if err := b.Run(step, NoopExecutor, false); err != nil {
+ return err
+ }
+ }
+
+ node.Children = children
+ return nil
}
func extractNameFromNode(node *parser.Node) (string, bool) {
@@ -345,6 +377,9 @@ var ErrNoFROM = fmt.Errorf("no FROM statement found")
// is set to the first From found, or left unchanged if already
// set.
func (b *Builder) From(node *parser.Node) (string, error) {
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return "", err
+ }
children := SplitChildren(node, command.From)
switch {
case len(children) == 0:
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
index 068d5cc6f..f6510c2fd 100644
--- a/vendor/github.com/openshift/imagebuilder/dispatchers.go
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -27,11 +27,6 @@ var (
obRgex = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`)
)
-// dispatch with no layer / parsing. This is effectively not a command.
-func nullDispatch(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
- return nil
-}
-
// ENV foo bar
//
// Sets the environment variable foo to bar, also makes interpolation
@@ -181,6 +176,17 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri
}
name := args[0]
+
+ // Support ARG before from
+ argStrs := []string{}
+ for n, v := range b.Args {
+ argStrs = append(argStrs, n+"="+v)
+ }
+ var err error
+ if name, err = ProcessWord(name, argStrs); err != nil {
+ return err
+ }
+
// Windows cannot support a container with no base image.
if name == NoBaseImageSpecifier {
if runtime.GOOS == "windows" {
@@ -438,6 +444,7 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs
healthcheck := docker.HealthConfig{}
flags := flag.NewFlagSet("", flag.ContinueOnError)
+ flags.String("start-period", "", "")
flags.String("interval", "", "")
flags.String("timeout", "", "")
flRetries := flags.String("retries", "", "")
@@ -462,6 +469,12 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs
return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
}
+ period, err := parseOptInterval(flags.Lookup("start-period"))
+ if err != nil {
+ return err
+ }
+ healthcheck.StartPeriod = period
+
interval, err := parseOptInterval(flags.Lookup("interval"))
if err != nil {
return err
diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go
index 83263127e..e1cd5d6d6 100644
--- a/vendor/github.com/openshift/imagebuilder/evaluator.go
+++ b/vendor/github.com/openshift/imagebuilder/evaluator.go
@@ -122,8 +122,7 @@ func (b *Step) Resolve(ast *parser.Node) error {
envs := b.Env
for ast.Next != nil {
ast = ast.Next
- var str string
- str = ast.Value
+ str := ast.Value
if replaceEnvAllowed[cmd] {
var err error
var words []string
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
index d3a8ae5fd..24822b2b7 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
@@ -18,76 +18,102 @@ import (
// #include "builtin.go.h"
import "C"
+// Repo represents a local ostree repository
type Repo struct {
- //*glib.GObject
ptr unsafe.Pointer
}
-// Converts an ostree repo struct to its C equivalent
+// isInitialized checks if the repo has been initialized
+func (r *Repo) isInitialized() bool {
+ if r == nil || r.ptr == nil {
+ return false
+ }
+ return true
+}
+
+// native converts an ostree repo struct to its C equivalent
func (r *Repo) native() *C.OstreeRepo {
- //return (*C.OstreeRepo)(r.Ptr())
+ if !r.isInitialized() {
+ return nil
+ }
return (*C.OstreeRepo)(r.ptr)
}
-// Takes a C ostree repo and converts it to a Go struct
-func repoFromNative(p *C.OstreeRepo) *Repo {
- if p == nil {
+// repoFromNative takes a C ostree repo and converts it to a Go struct
+func repoFromNative(or *C.OstreeRepo) *Repo {
+ if or == nil {
return nil
}
- //o := (*glib.GObject)(unsafe.Pointer(p))
- //r := &Repo{o}
- r := &Repo{unsafe.Pointer(p)}
+ r := &Repo{unsafe.Pointer(or)}
return r
}
-// Checks if the repo has been initialized
-func (r *Repo) isInitialized() bool {
- if r.ptr != nil {
- return true
+// OpenRepo attempts to open the repo at the given path
+func OpenRepo(path string) (*Repo, error) {
+ if path == "" {
+ return nil, errors.New("empty path")
}
- return false
-}
-// Attempts to open the repo at the given path
-func OpenRepo(path string) (*Repo, error) {
- var cerr *C.GError = nil
cpath := C.CString(path)
- pathc := C.g_file_new_for_path(cpath)
- defer C.g_object_unref(C.gpointer(pathc))
- crepo := C.ostree_repo_new(pathc)
+ defer C.free(unsafe.Pointer(cpath))
+ repoPath := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(repoPath))
+ crepo := C.ostree_repo_new(repoPath)
repo := repoFromNative(crepo)
+
+ var cerr *C.GError
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr)))
if !r {
return nil, generateError(cerr)
}
+
return repo, nil
}
-// Enable support for tombstone commits, which allow the repo to distinguish between
-// commits that were intentionally deleted and commits that were removed accidentally
-func enableTombstoneCommits(repo *Repo) error {
- var tombstoneCommits bool
- var config *C.GKeyFile = C.ostree_repo_get_config(repo.native())
- var cerr *C.GError
+// enableTombstoneCommits enables support for tombstone commits.
+//
+// This allows to distinguish between intentional deletions and accidental removals
+// of commits.
+func (r *Repo) enableTombstoneCommits() error {
+ if !r.isInitialized() {
+ return errors.New("repo not initialized")
+ }
- tombstoneCommits = glib.GoBool(glib.GBoolean(C.g_key_file_get_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), nil)))
+ config := C.ostree_repo_get_config(r.native())
+ groupC := C.CString("core")
+ defer C.free(unsafe.Pointer(groupC))
+ keyC := C.CString("tombstone-commits")
+ defer C.free(unsafe.Pointer(keyC))
+ valueC := C.g_key_file_get_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), nil)
+ tombstoneCommits := glib.GoBool(glib.GBoolean(valueC))
- //tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
+ // tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
if !tombstoneCommits {
- C.g_key_file_set_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), C.TRUE)
- if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(repo.native(), config, &cerr))) {
+ var cerr *C.GError
+ C.g_key_file_set_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), C.TRUE)
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(r.native(), config, &cerr))) {
return generateError(cerr)
}
}
return nil
}
+// generateError wraps a GLib error into a Go one.
func generateError(err *C.GError) error {
+ if err == nil {
+ return errors.New("nil GError")
+ }
+
goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err)))
_, file, line, ok := runtime.Caller(1)
if ok {
- return errors.New(fmt.Sprintf("%s:%d - %s", file, line, goErr))
- } else {
- return goErr
+ return fmt.Errorf("%s:%d - %s", file, line, goErr)
}
+ return goErr
+}
+
+// isOk wraps a return value (gboolean/gint) into a bool.
+// 0 is false/error, everything else is true/ok.
+func isOk(v C.int) bool {
+ return glib.GoBool(glib.GBoolean(v))
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
index 734de9821..76171554d 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
@@ -33,24 +33,12 @@ _ostree_repo_file(GFile *file)
return OSTREE_REPO_FILE (file);
}
-static guint
-_gpointer_to_uint (gpointer ptr)
-{
- return GPOINTER_TO_UINT (ptr);
-}
-
static gpointer
_guint_to_pointer (guint u)
{
return GUINT_TO_POINTER (u);
}
-static void
-_g_clear_object (volatile GObject **object_ptr)
-{
- g_clear_object(object_ptr);
-}
-
static const GVariantType*
_g_variant_type (char *type)
{
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
index 55b51bfbd..04ada1792 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
@@ -1,7 +1,7 @@
package otbuiltin
import (
- "strings"
+ "errors"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
@@ -14,34 +14,42 @@ import (
// #include "builtin.go.h"
import "C"
-// Global variable for options
-var checkoutOpts checkoutOptions
-
-// Contains all of the options for checking commits out of
-// an ostree repo
+// checkoutOptions defines all of the options for checking commits
+// out of an ostree repo
+//
+// Note: while this is private, fields are public and part of the API.
type checkoutOptions struct {
- UserMode bool // Do not change file ownership or initialize extended attributes
- Union bool // Keep existing directories and unchanged files, overwriting existing filesystem
- AllowNoent bool // Do nothing if the specified filepath does not exist
- DisableCache bool // Do not update or use the internal repository uncompressed object caceh
- Whiteouts bool // Process 'whiteout' (docker style) entries
- RequireHardlinks bool // Do not fall back to full copies if hard linking fails
- Subpath string // Checkout sub-directory path
- FromFile string // Process many checkouts from the given file
+ // UserMode defines whether to checkout a repo in `bare-user` mode
+ UserMode bool
+ // Union specifies whether to overwrite existing filesystem entries
+ Union bool
+ // AllowNoEnt defines whether to skip filepaths that do not exist
+ AllowNoent bool
+ // DisableCache defines whether to disable internal repository uncompressed object cache
+ DisableCache bool
+ // Whiteouts defines whether to Process 'whiteout' (docker style) entries
+ Whiteouts bool
+ // RequireHardlinks defines whether to fall back to full copies if hard linking fails
+ RequireHardlinks bool
+ // SubPath specifies a sub-directory to use for checkout
+ Subpath string
+ // FromFile specifies an optional file containing many checkouts to process
+ FromFile string
}
-// Instantiates and returns a checkoutOptions struct with default values set
+// NewCheckoutOptions instantiates and returns a checkoutOptions struct with default values set
func NewCheckoutOptions() checkoutOptions {
return checkoutOptions{}
}
-// Checks out a commit with the given ref from a repository at the location of repo path to to the destination. Returns an error if the checkout could not be processed
-func Checkout(repoPath, destination, commit string, opts checkoutOptions) error {
- checkoutOpts = opts
-
+// Checkout checks out commit `commitRef` from a repository at `repoPath`,
+// writing it to `destination`. Returns an error if the checkout could not be processed.
+func Checkout(repoPath, destination, commitRef string, opts checkoutOptions) error {
var cancellable *glib.GCancellable
- ccommit := C.CString(commit)
+
+ ccommit := C.CString(commitRef)
defer C.free(unsafe.Pointer(ccommit))
+
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
@@ -53,50 +61,48 @@ func Checkout(repoPath, destination, commit string, opts checkoutOptions) error
return generateError(cerr)
}
- if strings.Compare(checkoutOpts.FromFile, "") != 0 {
- err := processManyCheckouts(crepo, destination, cancellable)
- if err != nil {
- return err
- }
- } else {
- var resolvedCommit *C.char
- defer C.free(unsafe.Pointer(resolvedCommit))
- if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
- return generateError(cerr)
- }
- err := processOneCheckout(crepo, resolvedCommit, checkoutOpts.Subpath, destination, cancellable)
- if err != nil {
- return err
- }
+ // Multiple checkouts to process
+ if opts.FromFile != "" {
+ return processManyCheckouts(crepo, destination, cancellable)
}
- return nil
+
+ // Simple single checkout
+ var resolvedCommit *C.char
+ defer C.free(unsafe.Pointer(resolvedCommit))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
+ return generateError(cerr)
+ }
+
+ return processOneCheckout(crepo, resolvedCommit, destination, opts, cancellable)
}
-// Processes one checkout from the repo
-func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, subpath, destination string, cancellable *glib.GCancellable) error {
+// processOneCheckout processes one checkout from the repo
+func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, destination string, opts checkoutOptions, cancellable *glib.GCancellable) error {
cdest := C.CString(destination)
defer C.free(unsafe.Pointer(cdest))
+
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
- if checkoutOpts.UserMode {
+ // Process options into bitflags
+ var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
+ if opts.UserMode {
repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER
}
- if checkoutOpts.Union {
+ if opts.Union {
repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES
}
- checkedOut := glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr)))
- if !checkedOut {
+ // Checkout commit to destination
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr))) {
return generateError(cerr)
}
return nil
}
-// process many checkouts
+// processManyCheckouts processes many checkouts in a single batch
func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error {
- return nil
+ return errors.New("batch checkouts processing: not implemented")
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
index 9550f802c..ccaff7a10 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
@@ -59,11 +59,11 @@ func NewCommitOptions() commitOptions {
}
type OstreeRepoTransactionStats struct {
- metadata_objects_total int32
+ metadata_objects_total int32
metadata_objects_written int32
- content_objects_total int32
- content_objects_written int32
- content_bytes_written uint64
+ content_objects_total int32
+ content_objects_written int32
+ content_bytes_written uint64
}
func (repo *Repo) PrepareTransaction() (bool, error) {
@@ -125,6 +125,7 @@ func (repo *Repo) RegenerateSummary() error {
// Commits a directory, specified by commitPath, to an ostree repo as a given branch
func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) {
+ // TODO(lucab): `options` is global un-synchronized mutable state, get rid of it.
options = opts
var err error
@@ -140,7 +141,7 @@ func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string,
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
var metadata *C.GVariant = nil
- defer func(){
+ defer func() {
if metadata != nil {
defer C.g_variant_unref(metadata)
}
@@ -196,7 +197,7 @@ func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string,
}
if options.AddDetachedMetadataString != nil {
- _, err := parseKeyValueStrings(options.AddDetachedMetadataString)
+ _, err = parseKeyValueStrings(options.AddDetachedMetadataString)
if err != nil {
goto out
}
@@ -476,7 +477,7 @@ func handleStatOverrideLine(line string, table *glib.GHashTable) error {
// Handle an individual line from a Skiplist file
func handleSkipListline(line string, table *glib.GHashTable) error {
- C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer( C.g_strdup((*C.gchar)(C.CString(line)))))
+ C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line)))))
return nil
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
index c1ca2dc7e..6ee6671b4 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
@@ -1,11 +1,8 @@
package otbuiltin
import (
- "errors"
"strings"
"unsafe"
-
- glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
@@ -15,43 +12,37 @@ import (
// #include "builtin.go.h"
import "C"
-// Declare variables for options
-var initOpts initOptions
-
-// Contains all of the options for initializing an ostree repo
+// initOptions contains all of the options for initializing an ostree repo
+//
+// Note: while this is private, exported fields are public and part of the API.
type initOptions struct {
- Mode string // either bare, archive-z2, or bare-user
-
- repoMode C.OstreeRepoMode
+ // Mode defines repository mode: either bare, archive-z2, or bare-user
+ Mode string
}
-// Instantiates and returns an initOptions struct with default values set
+// NewInitOptions instantiates and returns an initOptions struct with default values set
func NewInitOptions() initOptions {
- io := initOptions{}
- io.Mode = "bare"
- io.repoMode = C.OSTREE_REPO_MODE_BARE
- return io
+ return initOptions{
+ Mode: "bare",
+ }
}
-// Initializes a new ostree repository at the given path. Returns true
+// Init initializes a new ostree repository at the given path. Returns true
// if the repo exists at the location, regardless of whether it was initialized
// by the function or if it already existed. Returns an error if the repo could
// not be initialized
func Init(path string, options initOptions) (bool, error) {
- initOpts = options
- err := parseMode()
+ repoMode, err := parseRepoMode(options.Mode)
if err != nil {
return false, err
}
// Create a repo struct from the path
- var cerr *C.GError
- defer C.free(unsafe.Pointer(cerr))
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
- crepo := C.ostree_repo_new(pathc)
+ repo := C.ostree_repo_new(pathc)
// If the repo exists in the filesystem, return an error but set exists to true
/* var exists C.gboolean = 0
@@ -63,28 +54,31 @@ func Init(path string, options initOptions) (bool, error) {
return false, generateError(cerr)
}*/
- cerr = nil
- created := glib.GoBool(glib.GBoolean(C.ostree_repo_create(crepo, initOpts.repoMode, nil, &cerr)))
- if !created {
- errString := generateError(cerr).Error()
- if strings.Contains(errString, "File exists") {
- return true, generateError(cerr)
+ var cErr *C.GError
+ defer C.free(unsafe.Pointer(cErr))
+ if r := C.ostree_repo_create(repo, repoMode, nil, &cErr); !isOk(r) {
+ err := generateError(cErr)
+ if strings.Contains(err.Error(), "File exists") {
+ return true, err
}
- return false, generateError(cerr)
+ return false, err
}
return true, nil
}
-// Converts the mode string to a C.OSTREE_REPO_MODE enum value
-func parseMode() error {
- if strings.EqualFold(initOpts.Mode, "bare") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_BARE
- } else if strings.EqualFold(initOpts.Mode, "bare-user") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_BARE_USER
- } else if strings.EqualFold(initOpts.Mode, "archive-z2") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_ARCHIVE_Z2
- } else {
- return errors.New("Invalid option for mode")
+// parseRepoMode converts a mode string to a C.OSTREE_REPO_MODE enum value
+func parseRepoMode(modeLabel string) (C.OstreeRepoMode, error) {
+ var cErr *C.GError
+ defer C.free(unsafe.Pointer(cErr))
+
+ cModeLabel := C.CString(modeLabel)
+ defer C.free(unsafe.Pointer(cModeLabel))
+
+ var retMode C.OstreeRepoMode
+ if r := C.ostree_repo_mode_from_string(cModeLabel, &retMode, &cErr); !isOk(r) {
+ // NOTE(lucab): zero-value for this C enum has no special/invalid meaning.
+ return C.OSTREE_REPO_MODE_BARE, generateError(cErr)
}
- return nil
+
+ return retMode, nil
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
index 2ceea0925..d57498215 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
@@ -2,7 +2,6 @@ package otbuiltin
import (
"fmt"
- "strings"
"time"
"unsafe"
@@ -16,13 +15,7 @@ import (
// #include "builtin.go.h"
import "C"
-// Declare variables for options
-var logOpts logOptions
-
-// Set the format of the strings in the log
-const formatString = "2006-01-02 03:04;05 -0700"
-
-// Struct for the various pieces of data in a log entry
+// LogEntry is a struct for the various pieces of data in a log entry
type LogEntry struct {
Checksum []byte
Variant []byte
@@ -39,24 +32,25 @@ func (l LogEntry) String() string {
return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant)
}
-type OstreeDumpFlags uint
+type ostreeDumpFlags uint
const (
- OSTREE_DUMP_NONE OstreeDumpFlags = 0
- OSTREE_DUMP_RAW OstreeDumpFlags = 1 << iota
+ ostreeDumpNone ostreeDumpFlags = 0
+ ostreeDumpRaw ostreeDumpFlags = 1 << iota
)
-// Contains all of the options for initializing an ostree repo
+// logOptions contains all of the options for initializing an ostree repo
type logOptions struct {
- Raw bool // Show raw variant data
+ // Raw determines whether to show raw variant data
+ Raw bool
}
-//Instantiates and returns a logOptions struct with default values set
+// NewLogOptions instantiates and returns a logOptions struct with default values set
func NewLogOptions() logOptions {
return logOptions{}
}
-// Show the logs of a branch starting with a given commit or ref. Returns a
+// Log shows the logs of a branch starting with a given commit or ref. Returns a
// slice of log entries on success and an error otherwise
func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
// attempt to open the repository
@@ -69,12 +63,12 @@ func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
defer C.free(unsafe.Pointer(cbranch))
var checksum *C.char
defer C.free(unsafe.Pointer(checksum))
- var flags OstreeDumpFlags = OSTREE_DUMP_NONE
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
- if logOpts.Raw {
- flags |= OSTREE_DUMP_RAW
+ flags := ostreeDumpNone
+ if options.Raw {
+ flags |= ostreeDumpRaw
}
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) {
@@ -84,84 +78,86 @@ func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
return logCommit(repo, checksum, false, flags)
}
-func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags OstreeDumpFlags) ([]LogEntry, error) {
+func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags ostreeDumpFlags) ([]LogEntry, error) {
var variant *C.GVariant
- var parent *C.char
- defer C.free(unsafe.Pointer(parent))
var gerr = glib.NewGError()
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- entries := make([]LogEntry, 0, 1)
- var err error
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) {
if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) {
return nil, nil
}
- return entries, generateError(cerr)
+ return nil, generateError(cerr)
}
- nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
-
- // get the parent of this commit
- parent = (*C.char)(C.ostree_commit_get_parent(variant))
+ // Get the parent of this commit
+ parent := (*C.char)(C.ostree_commit_get_parent(variant))
defer C.free(unsafe.Pointer(parent))
+
+ entries := make([]LogEntry, 0, 1)
if parent != nil {
+ var err error
entries, err = logCommit(repo, parent, true, flags)
if err != nil {
return nil, err
}
}
- entries = append(entries, *nextLogEntry)
+
+ nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
+ entries = append(entries, nextLogEntry)
+
return entries, nil
}
-func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags OstreeDumpFlags) *LogEntry {
- objLog := new(LogEntry)
- objLog.Checksum = []byte(C.GoString(checksum))
+func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags ostreeDumpFlags) LogEntry {
+ csum := []byte(C.GoString(checksum))
- if (flags & OSTREE_DUMP_RAW) != 0 {
- dumpVariant(objLog, variant)
- return objLog
+ if (flags & ostreeDumpRaw) != 0 {
+ return dumpVariant(variant, csum)
}
switch objectType {
case C.OSTREE_OBJECT_TYPE_COMMIT:
- dumpCommit(objLog, variant, flags)
- return objLog
+ return dumpCommit(variant, flags, csum)
default:
- return objLog
+ return LogEntry{
+ Checksum: csum,
+ }
}
}
-func dumpVariant(log *LogEntry, variant *C.GVariant) {
- var byteswappedVariant *C.GVariant
-
+func dumpVariant(variant *C.GVariant, csum []byte) LogEntry {
+ var logVariant []byte
if C.G_BYTE_ORDER != C.G_BIG_ENDIAN {
- byteswappedVariant = C.g_variant_byteswap(variant)
- log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ byteswappedVariant := C.g_variant_byteswap(variant)
+ logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
} else {
- log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(variant, C.TRUE))))
+ }
+
+ return LogEntry{
+ Checksum: csum,
+ Variant: logVariant,
}
}
-func dumpCommit(log *LogEntry, variant *C.GVariant, flags OstreeDumpFlags) {
- var subject, body *C.char
+func dumpCommit(variant *C.GVariant, flags ostreeDumpFlags, csum []byte) LogEntry {
+ var subject *C.char
defer C.free(unsafe.Pointer(subject))
+ var body *C.char
defer C.free(unsafe.Pointer(body))
- var timestamp C.guint64
+ var timeBigE C.guint64
- C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timestamp)
+ C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timeBigE)
- // Timestamp is now a Unix formatted timestamp as a guint64
- timestamp = C._guint64_from_be(timestamp)
- log.Timestamp = time.Unix((int64)(timestamp), 0)
-
- if strings.Compare(C.GoString(subject), "") != 0 {
- log.Subject = C.GoString(subject)
- }
+ // Translate to a host-endian epoch and convert to Go timestamp
+ timeHostE := C._guint64_from_be(timeBigE)
+ timestamp := time.Unix((int64)(timeHostE), 0)
- if strings.Compare(C.GoString(body), "") != 0 {
- log.Body = C.GoString(body)
+ return LogEntry{
+ Timestamp: timestamp,
+ Subject: C.GoString(subject),
+ Body: C.GoString(body),
}
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
index 8dfa40a55..532522fc5 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
@@ -145,7 +145,7 @@ func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancella
}
}
- if err := enableTombstoneCommits(repo); err != nil {
+ if err := repo.enableTombstoneCommits(); err != nil {
return err
}
@@ -169,7 +169,7 @@ func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *gl
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- if err := enableTombstoneCommits(repo); err != nil {
+ if err := repo.enableTombstoneCommits(); err != nil {
return err
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
+++ /dev/null
diff --git a/version/version.go b/version/version.go
index 0fd4e5aeb..45dc93d91 100644
--- a/version/version.go
+++ b/version/version.go
@@ -4,4 +4,4 @@ package version
// NOTE: remember to bump the version at the top
// of the top-level README.md file when this is
// bumped.
-const Version = "0.10.2-dev"
+const Version = "0.12.2-dev"