summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rw-r--r--.github/ISSUE_TEMPLATE.md2
-rw-r--r--.gitignore1
-rwxr-xr-xAPI.md524
-rw-r--r--Makefile15
-rw-r--r--README.md48
-rw-r--r--cmd/podman/attach.go69
-rw-r--r--cmd/podman/build.go236
-rw-r--r--cmd/podman/checkpoint.go71
-rw-r--r--cmd/podman/cleanup.go79
-rw-r--r--cmd/podman/cliconfig/commands.go109
-rw-r--r--cmd/podman/cliconfig/config.go554
-rw-r--r--cmd/podman/cliconfig/create.go26
-rw-r--r--cmd/podman/commands.go245
-rw-r--r--cmd/podman/commands_remoteclient.go51
-rw-r--r--cmd/podman/commit.go94
-rw-r--r--cmd/podman/common.go834
-rw-r--r--cmd/podman/common_test.go26
-rw-r--r--cmd/podman/container.go44
-rw-r--r--cmd/podman/containers_prune.go41
-rw-r--r--cmd/podman/cp.go291
-rw-r--r--cmd/podman/create.go165
-rw-r--r--cmd/podman/create_cli.go11
-rw-r--r--cmd/podman/diff.go62
-rw-r--r--cmd/podman/docker/types.go272
-rw-r--r--cmd/podman/errors.go23
-rw-r--r--cmd/podman/exec.go86
-rw-r--r--cmd/podman/exists.go104
-rw-r--r--cmd/podman/export.go54
-rw-r--r--cmd/podman/formats/formats.go5
-rw-r--r--cmd/podman/generate.go27
-rw-r--r--cmd/podman/generate_kube.go47
-rw-r--r--cmd/podman/history.go70
-rw-r--r--cmd/podman/image.go55
-rw-r--r--cmd/podman/imagefilters/filters.go2
-rw-r--r--cmd/podman/images.go148
-rw-r--r--cmd/podman/images_prune.go39
-rw-r--r--cmd/podman/import.go61
-rw-r--r--cmd/podman/info.go58
-rw-r--r--cmd/podman/inspect.go77
-rw-r--r--cmd/podman/kill.go71
-rw-r--r--cmd/podman/libpodruntime/runtime.go95
-rw-r--r--cmd/podman/load.go110
-rw-r--r--cmd/podman/login.go104
-rw-r--r--cmd/podman/logout.go53
-rw-r--r--cmd/podman/logs.go94
-rw-r--r--cmd/podman/main.go357
-rw-r--r--cmd/podman/mount.go67
-rw-r--r--cmd/podman/pause.go52
-rw-r--r--cmd/podman/play.go27
-rw-r--r--cmd/podman/play_kube.go80
-rw-r--r--cmd/podman/pod.go57
-rw-r--r--cmd/podman/pod_create.go192
-rw-r--r--cmd/podman/pod_inspect.go53
-rw-r--r--cmd/podman/pod_kill.go98
-rw-r--r--cmd/podman/pod_pause.go92
-rw-r--r--cmd/podman/pod_ps.go171
-rw-r--r--cmd/podman/pod_restart.go90
-rw-r--r--cmd/podman/pod_rm.go90
-rw-r--r--cmd/podman/pod_start.go91
-rw-r--r--cmd/podman/pod_stats.go205
-rw-r--r--cmd/podman/pod_stop.go100
-rw-r--r--cmd/podman/pod_top.go58
-rw-r--r--cmd/podman/pod_unpause.go91
-rw-r--r--cmd/podman/port.go80
-rw-r--r--cmd/podman/ps.go158
-rw-r--r--cmd/podman/pull.go168
-rw-r--r--cmd/podman/push.go124
-rw-r--r--cmd/podman/refresh.go39
-rw-r--r--cmd/podman/restart.go81
-rw-r--r--cmd/podman/restore.go67
-rw-r--r--cmd/podman/rm.go103
-rw-r--r--cmd/podman/rmi.go116
-rw-r--r--cmd/podman/run.go94
-rw-r--r--cmd/podman/run_test.go57
-rw-r--r--cmd/podman/runlabel.go150
-rw-r--r--cmd/podman/save.go183
-rw-r--r--cmd/podman/search.go314
-rw-r--r--cmd/podman/shared/container.go9
-rw-r--r--cmd/podman/shared/pod.go4
-rw-r--r--cmd/podman/sign.go55
-rw-r--r--cmd/podman/start.go126
-rw-r--r--cmd/podman/stats.go81
-rw-r--r--cmd/podman/stop.go71
-rw-r--r--cmd/podman/system.go35
-rw-r--r--cmd/podman/system_prune.go56
-rw-r--r--cmd/podman/system_renumber.go49
-rw-r--r--cmd/podman/tag.go37
-rw-r--r--cmd/podman/top.go57
-rw-r--r--cmd/podman/trust.go369
-rw-r--r--cmd/podman/trust_set_show.go344
-rw-r--r--cmd/podman/umount.go66
-rw-r--r--cmd/podman/unpause.go51
-rw-r--r--cmd/podman/utils.go35
-rw-r--r--cmd/podman/varlink.go49
-rw-r--r--cmd/podman/varlink/io.podman.varlink285
-rw-r--r--cmd/podman/varlink_dummy.go10
-rw-r--r--cmd/podman/version.go43
-rw-r--r--cmd/podman/volume.go41
-rw-r--r--cmd/podman/volume_create.go100
-rw-r--r--cmd/podman/volume_inspect.go73
-rw-r--r--cmd/podman/volume_ls.go115
-rw-r--r--cmd/podman/volume_prune.go79
-rw-r--r--cmd/podman/volume_rm.go87
-rw-r--r--cmd/podman/wait.go54
-rw-r--r--commands.md3
-rw-r--r--completions/bash/podman2
-rwxr-xr-xcontrib/cirrus/integration_test.sh2
-rw-r--r--contrib/cirrus/lib.sh2
-rw-r--r--contrib/cirrus/packer/Makefile8
-rw-r--r--contrib/cirrus/packer/fedora_setup.sh1
-rw-r--r--contrib/cirrus/packer/ubuntu_setup.sh9
-rwxr-xr-xcontrib/cirrus/setup_environment.sh5
-rw-r--r--contrib/perftest/main.go4
-rw-r--r--docs/libpod.conf.5.md12
-rw-r--r--docs/podman-attach.1.md2
-rw-r--r--docs/podman-build.1.md6
-rw-r--r--docs/podman-commit.1.md2
-rw-r--r--docs/podman-container-checkpoint.1.md2
-rw-r--r--docs/podman-container-cleanup.1.md5
-rw-r--r--docs/podman-container-restore.1.md2
-rw-r--r--docs/podman-cp.1.md80
-rw-r--r--docs/podman-create.1.md51
-rw-r--r--docs/podman-diff.1.md6
-rw-r--r--docs/podman-exec.1.md10
-rw-r--r--docs/podman-generate-kube.1.md4
-rw-r--r--docs/podman-image-trust.1.md30
-rw-r--r--docs/podman-inspect.1.md2
-rw-r--r--docs/podman-kill.1.md2
-rw-r--r--docs/podman-load.1.md4
-rw-r--r--docs/podman-login.1.md14
-rw-r--r--docs/podman-logs.1.md11
-rw-r--r--docs/podman-mount.1.md2
-rw-r--r--docs/podman-play-kube.1.md4
-rw-r--r--docs/podman-pod-create.1.md4
-rw-r--r--docs/podman-pod-inspect.1.md1
-rw-r--r--docs/podman-pod-kill.1.md2
-rw-r--r--docs/podman-pod-pause.1.md2
-rw-r--r--docs/podman-pod-ps.1.md2
-rw-r--r--docs/podman-pod-restart.1.md6
-rw-r--r--docs/podman-pod-rm.1.md2
-rw-r--r--docs/podman-pod-start.1.md2
-rw-r--r--docs/podman-pod-stop.1.md2
-rw-r--r--docs/podman-pod-top.1.md2
-rw-r--r--docs/podman-pod-unpause.1.md2
-rw-r--r--docs/podman-port.1.md10
-rw-r--r--docs/podman-ps.1.md2
-rw-r--r--docs/podman-pull.1.md4
-rw-r--r--docs/podman-restart.1.md2
-rw-r--r--docs/podman-rm.1.md19
-rw-r--r--docs/podman-rmi.1.md10
-rw-r--r--docs/podman-run.1.md53
-rw-r--r--docs/podman-start.1.md8
-rw-r--r--docs/podman-stats.1.md11
-rw-r--r--docs/podman-stop.1.md8
-rw-r--r--docs/podman-system-renumber.1.md29
-rw-r--r--docs/podman-top.1.md2
-rw-r--r--docs/podman-umount.1.md2
-rw-r--r--docs/podman-wait.1.md4
-rw-r--r--docs/podman.1.md1
-rwxr-xr-xhack/get_ci_vm.sh33
-rwxr-xr-xhack/tree_status.sh2
-rw-r--r--libpod.conf8
-rw-r--r--libpod/adapter/runtime.go157
-rw-r--r--libpod/adapter/runtime_remote.go434
-rw-r--r--libpod/boltdb_state.go92
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/container.go15
-rw-r--r--libpod/container_api.go176
-rw-r--r--libpod/container_attach_linux.go4
-rw-r--r--libpod/container_commit.go2
-rw-r--r--libpod/container_graph.go97
-rw-r--r--libpod/container_internal.go337
-rw-r--r--libpod/container_internal_linux.go69
-rw-r--r--libpod/errors.go16
-rw-r--r--libpod/image/image.go76
-rw-r--r--libpod/image/pull.go13
-rw-r--r--libpod/image/search.go277
-rw-r--r--libpod/image/utils.go43
-rw-r--r--libpod/in_memory_state.go64
-rw-r--r--libpod/kube.go16
-rw-r--r--libpod/lock/in_memory_locks.go11
-rw-r--r--libpod/lock/lock.go14
-rw-r--r--libpod/lock/shm/shm_lock.c36
-rw-r--r--libpod/lock/shm/shm_lock.go16
-rw-r--r--libpod/lock/shm/shm_lock.h1
-rw-r--r--libpod/lock/shm/shm_lock_test.go28
-rw-r--r--libpod/lock/shm_lock_manager_linux.go7
-rw-r--r--libpod/lock/shm_lock_manager_unsupported.go5
-rw-r--r--libpod/oci.go1
-rw-r--r--libpod/options.go24
-rw-r--r--libpod/pod_internal.go96
-rw-r--r--libpod/runtime.go79
-rw-r--r--libpod/runtime_ctr.go53
-rw-r--r--libpod/runtime_img.go46
-rw-r--r--libpod/runtime_pod_linux.go9
-rw-r--r--libpod/runtime_renumber.go57
-rw-r--r--libpod/runtime_volume.go80
-rw-r--r--libpod/runtime_volume_linux.go31
-rw-r--r--libpod/state.go24
-rw-r--r--libpod/state_test.go72
-rw-r--r--libpod/storage.go5
-rw-r--r--libpod/volume.go5
-rw-r--r--libpod/volume_internal.go7
-rw-r--r--pkg/adapter/client.go (renamed from libpod/adapter/client.go)0
-rw-r--r--pkg/adapter/containers_remote.go (renamed from libpod/adapter/containers_remote.go)31
-rw-r--r--pkg/adapter/images_remote.go (renamed from libpod/adapter/images_remote.go)0
-rw-r--r--pkg/adapter/info_remote.go (renamed from libpod/adapter/info_remote.go)0
-rw-r--r--pkg/adapter/pods.go323
-rw-r--r--pkg/adapter/pods_remote.go401
-rw-r--r--pkg/adapter/runtime.go335
-rw-r--r--pkg/adapter/runtime_remote.go798
-rw-r--r--pkg/adapter/shortcuts/shortcuts.go27
-rw-r--r--pkg/adapter/volumes_remote.go33
-rw-r--r--pkg/registries/registries.go16
-rw-r--r--pkg/rootless/rootless_linux.c63
-rw-r--r--pkg/spec/createconfig.go72
-rw-r--r--pkg/spec/spec.go58
-rw-r--r--pkg/tracing/tracing.go28
-rw-r--r--pkg/util/utils.go10
-rw-r--r--pkg/varlinkapi/config.go9
-rw-r--r--pkg/varlinkapi/containers.go77
-rw-r--r--pkg/varlinkapi/containers_create.go7
-rw-r--r--pkg/varlinkapi/images.go635
-rw-r--r--pkg/varlinkapi/pods.go56
-rw-r--r--pkg/varlinkapi/system.go25
-rw-r--r--pkg/varlinkapi/transfers.go2
-rw-r--r--pkg/varlinkapi/util.go35
-rw-r--r--pkg/varlinkapi/volumes.go90
-rw-r--r--test/README.md2
-rw-r--r--test/bin2img/bin2img.go229
-rw-r--r--test/copyimg/copyimg.go204
-rw-r--r--test/e2e/cp_test.go115
-rw-r--r--test/e2e/create_test.go8
-rw-r--r--test/e2e/e2e.coverprofile11
-rw-r--r--test/e2e/libpod_suite_test.go4
-rw-r--r--test/e2e/load_test.go6
-rw-r--r--test/e2e/pod_infra_container_test.go51
-rw-r--r--test/e2e/pod_start_test.go1
-rw-r--r--test/e2e/pod_stats_test.go23
-rw-r--r--test/e2e/pod_stop_test.go1
-rw-r--r--test/e2e/pull_test.go16
-rw-r--r--test/e2e/restart_test.go2
-rw-r--r--test/e2e/rm_test.go5
-rw-r--r--test/e2e/rmi_test.go2
-rw-r--r--test/e2e/rootless_test.go45
-rw-r--r--test/e2e/run_networking_test.go39
-rw-r--r--test/e2e/run_test.go2
-rw-r--r--test/e2e/save_test.go2
-rw-r--r--test/e2e/search_test.go4
-rw-r--r--test/e2e/volume_create_test.go2
-rw-r--r--test/e2e/volume_inspect_test.go2
-rw-r--r--test/e2e/volume_ls_test.go2
-rw-r--r--test/e2e/volume_rm_test.go3
-rw-r--r--test/trust_set_test.json8
-rw-r--r--test/utils/podmantest_test.go2
-rw-r--r--test/utils/utils.go14
-rw-r--r--transfer.md4
-rw-r--r--troubleshooting.md63
-rw-r--r--utils/utils.go42
-rw-r--r--vendor.conf22
-rw-r--r--vendor/github.com/VividCortex/ewma/LICENSE (renamed from vendor/github.com/urfave/cli/LICENSE)12
-rw-r--r--vendor/github.com/VividCortex/ewma/README.md140
-rw-r--r--vendor/github.com/VividCortex/ewma/ewma.go126
-rw-r--r--vendor/github.com/containers/buildah/buildah.go31
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go23
-rw-r--r--vendor/github.com/containers/buildah/commit.go13
-rw-r--r--vendor/github.com/containers/buildah/config.go47
-rw-r--r--vendor/github.com/containers/buildah/docker/types.go18
-rw-r--r--vendor/github.com/containers/buildah/image.go40
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go36
-rw-r--r--vendor/github.com/containers/buildah/new.go68
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go433
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go185
-rw-r--r--vendor/github.com/containers/buildah/pull.go96
-rw-r--r--vendor/github.com/containers/buildah/run.go29
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.c140
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.go6
-rw-r--r--vendor/github.com/containers/buildah/util.go91
-rw-r--r--vendor/github.com/containers/buildah/util/util.go86
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf107
-rw-r--r--vendor/github.com/containers/image/copy/copy.go187
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go20
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go6
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go76
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go13
-rw-r--r--vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go23
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go73
-rw-r--r--vendor/github.com/containers/image/storage/storage_reference.go22
-rw-r--r--vendor/github.com/containers/image/storage/storage_transport.go5
-rw-r--r--vendor/github.com/containers/image/vendor.conf4
-rw-r--r--vendor/github.com/containers/storage/containers.go3
-rw-r--r--vendor/github.com/containers/storage/containers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go2
-rw-r--r--vendor/github.com/containers/storage/errors.go2
-rw-r--r--vendor/github.com/containers/storage/images.go197
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/layers.go4
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/lockfile.go52
-rw-r--r--vendor/github.com/containers/storage/lockfile_unix.go117
-rw-r--r--vendor/github.com/containers/storage/lockfile_windows.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go107
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go92
-rw-r--r--vendor/github.com/containers/storage/pkg/config/config.go96
-rw-r--r--vendor/github.com/containers/storage/store.go536
-rw-r--r--vendor/github.com/containers/storage/vendor.conf9
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/LICENSE13
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/README.md23
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_others.go15
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_windows.go98
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go46
-rw-r--r--vendor/github.com/mattn/go-isatty/LICENSE9
-rw-r--r--vendor/github.com/mattn/go-isatty/README.md50
-rw-r--r--vendor/github.com/mattn/go-isatty/doc.go2
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_appengine.go15
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_linux.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go19
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_others.go10
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go16
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_windows.go94
-rw-r--r--vendor/github.com/mattn/go-runewidth/LICENSE21
-rw-r--r--vendor/github.com/mattn/go-runewidth/README.mkd27
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth.go977
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_appengine.go8
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_js.go9
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_posix.go79
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_windows.go28
-rw-r--r--vendor/github.com/openshift/imagebuilder/README.md2
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go31
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go13
-rw-r--r--vendor/github.com/opentracing/opentracing-go/LICENSE201
-rw-r--r--vendor/github.com/opentracing/opentracing-go/README.md171
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext/tags.go210
-rw-r--r--vendor/github.com/opentracing/opentracing-go/globaltracer.go42
-rw-r--r--vendor/github.com/opentracing/opentracing-go/gocontext.go54
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/field.go269
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/util.go54
-rw-r--r--vendor/github.com/opentracing/opentracing-go/noop.go64
-rw-r--r--vendor/github.com/opentracing/opentracing-go/propagation.go176
-rw-r--r--vendor/github.com/opentracing/opentracing-go/span.go189
-rw-r--r--vendor/github.com/opentracing/opentracing-go/tracer.go305
-rw-r--r--vendor/github.com/spf13/cobra/LICENSE.txt174
-rw-r--r--vendor/github.com/spf13/cobra/README.md736
-rw-r--r--vendor/github.com/spf13/cobra/args.go89
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go584
-rw-r--r--vendor/github.com/spf13/cobra/cobra.go200
-rw-r--r--vendor/github.com/spf13/cobra/command.go1517
-rw-r--r--vendor/github.com/spf13/cobra/command_notwin.go5
-rw-r--r--vendor/github.com/spf13/cobra/command_win.go20
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go126
-rw-r--r--vendor/github.com/uber/jaeger-client-go/LICENSE201
-rw-r--r--vendor/github.com/uber/jaeger-client-go/README.md270
-rw-r--r--vendor/github.com/uber/jaeger-client-go/baggage_setter.go77
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go395
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config_env.go221
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/options.go148
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go88
-rw-r--r--vendor/github.com/uber/jaeger-client-go/context.go258
-rw-r--r--vendor/github.com/uber/jaeger-client-go/contrib_observer.go56
-rw-r--r--vendor/github.com/uber/jaeger-client-go/doc.go24
-rw-r--r--vendor/github.com/uber/jaeger-client-go/header.go64
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go101
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go157
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go71
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go81
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go99
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go216
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go32
-rw-r--r--vendor/github.com/uber/jaeger-client-go/interop.go55
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_tag.go84
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go179
-rw-r--r--vendor/github.com/uber/jaeger-client-go/log/logger.go90
-rw-r--r--vendor/github.com/uber/jaeger-client-go/logger.go53
-rw-r--r--vendor/github.com/uber/jaeger-client-go/metrics.go107
-rw-r--r--vendor/github.com/uber/jaeger-client-go/observer.go88
-rw-r--r--vendor/github.com/uber/jaeger-client-go/process.go29
-rw-r--r--vendor/github.com/uber/jaeger-client-go/propagation.go300
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reference.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter.go289
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter_options.go69
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md5
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go16
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go63
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go124
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go101
-rw-r--r--vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go171
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler.go557
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_options.go81
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span.go249
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go411
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go21
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go435
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go154
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go242
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go1838
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go18
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go410
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go873
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go32
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go1247
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go446
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/README.md7
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go142
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go514
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go815
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/exception.go44
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go79
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go31
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/numeric.go164
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/processor.go30
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol.go175
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go78
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go25
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go69
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/serializer.go75
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go1337
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport.go68
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go90
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go39
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/type.go69
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer.go441
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer_options.go159
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport.go38
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport/doc.go23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport/http.go163
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport_udp.go131
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/http_json.go54
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/localip.go84
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/rand.go46
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go77
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/udp_client.go98
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/utils.go87
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin.go76
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go322
-rw-r--r--vendor/github.com/uber/jaeger-lib/LICENSE201
-rw-r--r--vendor/github.com/uber/jaeger-lib/README.md27
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/counter.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/factory.go78
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/gauge.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/histogram.go28
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/keys.go35
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/metrics.go137
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go43
-rw-r--r--vendor/github.com/uber/jaeger-lib/metrics/timer.go33
-rw-r--r--vendor/github.com/urfave/cli/README.md1526
-rw-r--r--vendor/github.com/urfave/cli/app.go508
-rw-r--r--vendor/github.com/urfave/cli/category.go44
-rw-r--r--vendor/github.com/urfave/cli/cli.go22
-rw-r--r--vendor/github.com/urfave/cli/command.go383
-rw-r--r--vendor/github.com/urfave/cli/context.go287
-rw-r--r--vendor/github.com/urfave/cli/errors.go115
-rw-r--r--vendor/github.com/urfave/cli/flag.go786
-rw-r--r--vendor/github.com/urfave/cli/flag_generated.go640
-rw-r--r--vendor/github.com/urfave/cli/funcs.go44
-rw-r--r--vendor/github.com/urfave/cli/help.go345
-rw-r--r--vendor/github.com/urfave/cli/sort.go29
-rw-r--r--vendor/github.com/vbauerster/mpb/LICENSE29
-rw-r--r--vendor/github.com/vbauerster/mpb/README.md117
-rw-r--r--vendor/github.com/vbauerster/mpb/bar.go455
-rw-r--r--vendor/github.com/vbauerster/mpb/bar_option.go124
-rw-r--r--vendor/github.com/vbauerster/mpb/cwriter/writer.go78
-rw-r--r--vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go13
-rw-r--r--vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go77
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/counters.go206
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/decorator.go144
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/doc.go25
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/elapsed.go68
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/eta.go207
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/moving-average.go66
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/name.go45
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/percentage.go39
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/speed.go270
-rw-r--r--vendor/github.com/vbauerster/mpb/doc.go6
-rw-r--r--vendor/github.com/vbauerster/mpb/internal/percentage.go10
-rw-r--r--vendor/github.com/vbauerster/mpb/internal/round.go49
-rw-r--r--vendor/github.com/vbauerster/mpb/options.go95
-rw-r--r--vendor/github.com/vbauerster/mpb/options_go1.7.go15
-rw-r--r--vendor/github.com/vbauerster/mpb/priority_queue.go40
-rw-r--r--vendor/github.com/vbauerster/mpb/progress.go251
-rw-r--r--vendor/github.com/vbauerster/mpb/progress_posix.go70
-rw-r--r--vendor/github.com/vbauerster/mpb/progress_windows.go43
-rw-r--r--vendor/github.com/vbauerster/mpb/proxyreader.go22
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/LICENSE12
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/README.md177
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/format.go125
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb.go503
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go11
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go143
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go118
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pool.go104
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go45
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go29
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/reader.go25
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/runecount.go17
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go9
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go13
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.editorconfig5
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md11
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md8
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.gitignore6
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.travis.yml30
-rw-r--r--vendor/gopkg.in/fsnotify.v1/AUTHORS52
-rw-r--r--vendor/gopkg.in/fsnotify.v1/CHANGELOG.md317
-rw-r--r--vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md77
-rw-r--r--vendor/gopkg.in/fsnotify.v1/README.md37
-rw-r--r--vendor/gopkg.in/fsnotify.v1/example_test.go42
-rw-r--r--vendor/gopkg.in/fsnotify.v1/fsnotify.go4
-rw-r--r--vendor/gopkg.in/fsnotify.v1/fsnotify_test.go70
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify.go66
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go229
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_test.go449
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go147
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_test.go1237
-rw-r--r--vendor/gopkg.in/fsnotify.v1/kqueue.go62
520 files changed, 42278 insertions, 15706 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index cb1279331..ad9edd404 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -29,7 +29,7 @@ env:
###
FEDORA_CACHE_IMAGE_NAME: "fedora-29-libpod-7f4cd1f7"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-28-libpod-7f4cd1f7"
- UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-libpod-7f4cd1f7"
+ UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-libpod-84514d8b"
# RHEL_CACHE_IMAGE_NAME: "rhel-8-notready"
PRIOR_RHEL_CACHE_IMAGE_NAME: "rhel-7-libpod-7f4cd1f7"
# CENTOS_CACHE_IMAGE_NAME: "centos-7-notready"
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index a7663f3e3..2946f0b91 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -52,7 +52,7 @@ Briefly describe the problem you are having in a few paragraphs.
(paste your output here)
```
-**Output of `podman info`:**
+**Output of `podman info --debug`:**
```
(paste your output here)
diff --git a/.gitignore b/.gitignore
index c35cd3383..020621558 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,3 +17,4 @@ __pycache__
/cmd/podman/varlink/ioprojectatomicpodman.go
/cmd/podman/varlink/iopodman.go
.gopathok
+test/e2e/e2e.coverprofile
diff --git a/API.md b/API.md
index f32325d37..c4f23c9bd 100755
--- a/API.md
+++ b/API.md
@@ -3,9 +3,7 @@ Podman Service Interface and API description. The master version of this docume
in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in the upstream libpod repository.
## Index
-[func AttachToContainer() NotImplemented](#AttachToContainer)
-
-[func BuildImage(build: BuildInfo) BuildResponse](#BuildImage)
+[func BuildImage(build: BuildInfo) MoreResponse](#BuildImage)
[func Commit(name: string, image_name: string, changes: []string, author: string, message: string, pause: bool, manifestType: string) string](#Commit)
@@ -27,8 +25,6 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func CreateContainer(create: Create) string](#CreateContainer)
-[func CreateImage() NotImplemented](#CreateImage)
-
[func CreatePod(create: PodCreate) string](#CreatePod)
[func DeleteStoppedContainers() []string](#DeleteStoppedContainers)
@@ -39,19 +35,15 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func ExportImage(name: string, destination: string, compress: bool, tags: []string) string](#ExportImage)
-[func GenerateKube() NotImplemented](#GenerateKube)
-
-[func GenerateKubeService() NotImplemented](#GenerateKubeService)
-
[func GetAttachSockets(name: string) Sockets](#GetAttachSockets)
-[func GetContainer(name: string) ListContainerData](#GetContainer)
+[func GetContainer(id: string) Container](#GetContainer)
[func GetContainerLogs(name: string) []string](#GetContainerLogs)
[func GetContainerStats(name: string) ContainerStats](#GetContainerStats)
-[func GetImage(name: string) ImageInList](#GetImage)
+[func GetImage(id: string) Image](#GetImage)
[func GetInfo() PodmanInfo](#GetInfo)
@@ -59,15 +51,21 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func GetPodStats(name: string) string, ContainerStats](#GetPodStats)
-[func GetVersion() Version](#GetVersion)
+[func GetPodsByContext(all: bool, latest: bool, args: []string) []string](#GetPodsByContext)
+
+[func GetVersion() string, string, string, string, string, int](#GetVersion)
+
+[func GetVolumes(args: []string, all: bool) Volume](#GetVolumes)
[func HistoryImage(name: string) ImageHistory](#HistoryImage)
[func ImageExists(name: string) int](#ImageExists)
+[func ImageSave(options: ImageSaveOptions) MoreResponse](#ImageSave)
+
[func ImagesPrune(all: bool) []string](#ImagesPrune)
-[func ImportImage(source: string, reference: string, message: string, changes: []string) string](#ImportImage)
+[func ImportImage(source: string, reference: string, message: string, changes: []string, delete: bool) string](#ImportImage)
[func InspectContainer(name: string) string](#InspectContainer)
@@ -83,45 +81,43 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func ListContainerMounts() map[string]](#ListContainerMounts)
-[func ListContainerPorts(name: string) NotImplemented](#ListContainerPorts)
-
[func ListContainerProcesses(name: string, opts: []string) []string](#ListContainerProcesses)
-[func ListContainers() ListContainerData](#ListContainers)
+[func ListContainers() Container](#ListContainers)
-[func ListImages() ImageInList](#ListImages)
+[func ListImages() Image](#ListImages)
[func ListPods() ListPodData](#ListPods)
+[func LoadImage(name: string, inputFile: string, quiet: bool, deleteFile: bool) MoreResponse](#LoadImage)
+
[func MountContainer(name: string) string](#MountContainer)
[func PauseContainer(name: string) string](#PauseContainer)
[func PausePod(name: string) string](#PausePod)
-[func Ping() StringResponse](#Ping)
+[func PodStateData(name: string) string](#PodStateData)
+
+[func PullImage(name: string, certDir: string, creds: string, signaturePolicy: string, tlsVerify: ) MoreResponse](#PullImage)
-[func PullImage(name: string, certDir: string, creds: string, signaturePolicy: string, tlsVerify: bool) string](#PullImage)
+[func PushImage(name: string, tag: string, tlsverify: , signaturePolicy: string, creds: string, certDir: string, compress: bool, format: string, removeSignatures: bool, signBy: string) MoreResponse](#PushImage)
-[func PushImage(name: string, tag: string, tlsverify: bool, signaturePolicy: string, creds: string, certDir: string, compress: bool, format: string, removeSignatures: bool, signBy: string) string](#PushImage)
+[func ReceiveFile(path: string, delete: bool) int](#ReceiveFile)
-[func RemoveContainer(name: string, force: bool) string](#RemoveContainer)
+[func RemoveContainer(name: string, force: bool, removeVolumes: bool) string](#RemoveContainer)
[func RemoveImage(name: string, force: bool) string](#RemoveImage)
[func RemovePod(name: string, force: bool) string](#RemovePod)
-[func RenameContainer() NotImplemented](#RenameContainer)
-
-[func ReplayKube() NotImplemented](#ReplayKube)
-
-[func ResizeContainerTty() NotImplemented](#ResizeContainerTty)
-
[func RestartContainer(name: string, timeout: int) string](#RestartContainer)
[func RestartPod(name: string) string](#RestartPod)
-[func SearchImage(name: string, limit: int) ImageSearch](#SearchImage)
+[func SearchImages(query: string, limit: , tlsVerify: , filter: ImageSearchFilter) ImageSearchResult](#SearchImages)
+
+[func SendFile(type: string, length: int) string](#SendFile)
[func StartContainer(name: string) string](#StartContainer)
@@ -133,23 +129,25 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func TagImage(name: string, tagged: string) string](#TagImage)
-[func TopPod() NotImplemented](#TopPod)
-
[func UnmountContainer(name: string, force: bool) ](#UnmountContainer)
[func UnpauseContainer(name: string) string](#UnpauseContainer)
[func UnpausePod(name: string) string](#UnpausePod)
-[func UpdateContainer() NotImplemented](#UpdateContainer)
+[func VolumeCreate(options: VolumeCreateOpts) string](#VolumeCreate)
-[func WaitContainer(name: string) int](#WaitContainer)
+[func VolumeRemove(options: VolumeRemoveOpts) []string](#VolumeRemove)
+
+[func VolumesPrune() []string, []string](#VolumesPrune)
-[func WaitPod() NotImplemented](#WaitPod)
+[func WaitContainer(name: string) int](#WaitContainer)
[type BuildInfo](#BuildInfo)
-[type BuildResponse](#BuildResponse)
+[type BuildOptions](#BuildOptions)
+
+[type Container](#Container)
[type ContainerChanges](#ContainerChanges)
@@ -169,11 +167,15 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[type IDMappingOptions](#IDMappingOptions)
+[type Image](#Image)
+
[type ImageHistory](#ImageHistory)
-[type ImageInList](#ImageInList)
+[type ImageSaveOptions](#ImageSaveOptions)
+
+[type ImageSearchFilter](#ImageSearchFilter)
-[type ImageSearch](#ImageSearch)
+[type ImageSearchResult](#ImageSearchResult)
[type InfoDistribution](#InfoDistribution)
@@ -185,12 +187,12 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[type InfoStore](#InfoStore)
-[type ListContainerData](#ListContainerData)
-
[type ListPodContainerInfo](#ListPodContainerInfo)
[type ListPodData](#ListPodData)
+[type MoreResponse](#MoreResponse)
+
[type NotImplemented](#NotImplemented)
[type PodContainerErrorData](#PodContainerErrorData)
@@ -205,7 +207,11 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[type StringResponse](#StringResponse)
-[type Version](#Version)
+[type Volume](#Volume)
+
+[type VolumeCreateOpts](#VolumeCreateOpts)
+
+[type VolumeRemoveOpts](#VolumeRemoveOpts)
[error ContainerNotFound](#ContainerNotFound)
@@ -223,18 +229,15 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[error RuntimeError](#RuntimeError)
-## Methods
-### <a name="AttachToContainer"></a>func AttachToContainer
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+[error VolumeNotFound](#VolumeNotFound)
-method AttachToContainer() [NotImplemented](#NotImplemented)</div>
-This method has not be implemented yet.
+## Methods
### <a name="BuildImage"></a>func BuildImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method BuildImage(build: [BuildInfo](#BuildInfo)) [BuildResponse](#BuildResponse)</div>
+method BuildImage(build: [BuildInfo](#BuildInfo)) [MoreResponse](#MoreResponse)</div>
BuildImage takes a [BuildInfo](#BuildInfo) structure and builds an image. At a minimum, you must provide the
-'dockerfile' and 'tags' options in the BuildInfo structure. It will return a [BuildResponse](#BuildResponse) structure
+'dockerfile' and 'tags' options in the BuildInfo structure. It will return a [MoreResponse](#MoreResponse) structure
that contains the build logs and resulting image ID.
### <a name="Commit"></a>func Commit
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -316,11 +319,6 @@ $ varlink call unix:/run/podman/io.podman/io.podman.CreateContainer '{"create":
"container": "8759dafbc0a4dc3bcfb57eeb72e4331eb73c5cc09ab968e65ce45b9ad5c4b6bb"
}
~~~
-### <a name="CreateImage"></a>func CreateImage
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method CreateImage() [NotImplemented](#NotImplemented)</div>
-This function is not implemented yet.
### <a name="CreatePod"></a>func CreatePod
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -398,18 +396,6 @@ a booleon option to force compression. It also takes in a string array of tags
tags of the same image to a tarball (each tag should be of the form <image>:<tag>). Upon completion, the ID
of the image is returned. If the image cannot be found in local storage, an [ImageNotFound](#ImageNotFound)
error will be returned. See also [ImportImage](ImportImage).
-### <a name="GenerateKube"></a>func GenerateKube
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method GenerateKube() [NotImplemented](#NotImplemented)</div>
-GenerateKube generates a Kubernetes v1 Pod description of a Podman container or pod
-and its containers. The description is in YAML. See also [ReplayKube](ReplayKube).
-### <a name="GenerateKubeService"></a>func GenerateKubeService
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method GenerateKubeService() [NotImplemented](#NotImplemented)</div>
-GenerateKubeService generates a Kubernetes v1 Service description of a Podman container or pod
-and its containers. The description is in YAML. See also [GenerateKube](GenerateKube).
### <a name="GetAttachSockets"></a>func GetAttachSockets
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -432,10 +418,11 @@ $ varlink call -m unix:/run/io.podman/io.podman.GetAttachSockets '{"name": "b762
### <a name="GetContainer"></a>func GetContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method GetContainer(name: [string](https://godoc.org/builtin#string)) [ListContainerData](#ListContainerData)</div>
-GetContainer takes a name or ID of a container and returns single ListContainerData
-structure. A [ContainerNotFound](#ContainerNotFound) error will be returned if the container cannot be found.
-See also [ListContainers](ListContainers) and [InspectContainer](#InspectContainer).
+method GetContainer(id: [string](https://godoc.org/builtin#string)) [Container](#Container)</div>
+GetContainer returns information about a single container. If a container
+with the given id doesn't exist, a [ContainerNotFound](#ContainerNotFound)
+error will be returned. See also [ListContainers](ListContainers) and
+[InspectContainer](#InspectContainer).
### <a name="GetContainerLogs"></a>func GetContainerLogs
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -476,9 +463,9 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.GetContainerStats '{"name
### <a name="GetImage"></a>func GetImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method GetImage(name: [string](https://godoc.org/builtin#string)) [ImageInList](#ImageInList)</div>
-GetImage returns a single image in an [ImageInList](#ImageInList) struct. You must supply an image name as a string.
-If the image cannot be found, an [ImageNotFound](#ImageNotFound) error will be returned.
+method GetImage(id: [string](https://godoc.org/builtin#string)) [Image](#Image)</div>
+GetImage returns information about a single image in storage.
+If the image caGetImage returns be found, [ImageNotFound](#ImageNotFound) will be returned.
### <a name="GetInfo"></a>func GetInfo
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -550,12 +537,23 @@ $ varlink call unix:/run/podman/io.podman/io.podman.GetPodStats '{"name": "7f62b
"pod": "7f62b508b6f12b11d8fe02e0db4de6b9e43a7d7699b33a4fc0d574f6e82b4ebd"
}
~~~
+### <a name="GetPodsByContext"></a>func GetPodsByContext
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method GetPodsByContext(all: [bool](https://godoc.org/builtin#bool), latest: [bool](https://godoc.org/builtin#bool), args: [[]string](#[]string)) [[]string](#[]string)</div>
+GetPodsByContext allows you to get a list pod ids depending on all, latest, or a list of
+pod names. The definition of latest pod means the latest by creation date. In a multi-
+user environment, results might differ from what you expect.
### <a name="GetVersion"></a>func GetVersion
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method GetVersion() [Version](#Version)</div>
-GetVersion returns a Version structure describing the libpod setup on their
-system.
+method GetVersion() [string](https://godoc.org/builtin#string), [string](https://godoc.org/builtin#string), [string](https://godoc.org/builtin#string), [string](https://godoc.org/builtin#string), [string](https://godoc.org/builtin#string), [int](https://godoc.org/builtin#int)</div>
+GetVersion returns version and build information of the podman service
+### <a name="GetVolumes"></a>func GetVolumes
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method GetVolumes(args: [[]string](#[]string), all: [bool](https://godoc.org/builtin#bool)) [Volume](#Volume)</div>
+GetVolumes gets slice of the volumes on a remote host
### <a name="HistoryImage"></a>func HistoryImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -577,6 +575,11 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.ImageExists '{"name": "im
"exists": 1
}
~~~
+### <a name="ImageSave"></a>func ImageSave
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ImageSave(options: [ImageSaveOptions](#ImageSaveOptions)) [MoreResponse](#MoreResponse)</div>
+ImageSave allows you to save an image from the local image storage to a tarball
### <a name="ImagesPrune"></a>func ImagesPrune
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -586,7 +589,7 @@ the IDs of the removed images are returned.
### <a name="ImportImage"></a>func ImportImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method ImportImage(source: [string](https://godoc.org/builtin#string), reference: [string](https://godoc.org/builtin#string), message: [string](https://godoc.org/builtin#string), changes: [[]string](#[]string)) [string](https://godoc.org/builtin#string)</div>
+method ImportImage(source: [string](https://godoc.org/builtin#string), reference: [string](https://godoc.org/builtin#string), message: [string](https://godoc.org/builtin#string), changes: [[]string](#[]string), delete: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
ImportImage imports an image from a source (like tarball) into local storage. The image can have additional
descriptions added to it using the message and changes options. See also [ExportImage](ExportImage).
### <a name="InspectContainer"></a>func InspectContainer
@@ -656,11 +659,6 @@ $ varlink call unix:/run/podman/io.podman/io.podman.ListContainerMounts
}
}
~~~
-### <a name="ListContainerPorts"></a>func ListContainerPorts
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method ListContainerPorts(name: [string](https://godoc.org/builtin#string)) [NotImplemented](#NotImplemented)</div>
-This function is not implemented yet.
### <a name="ListContainerProcesses"></a>func ListContainerProcesses
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -684,15 +682,15 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.ListContainerProcesses '{
### <a name="ListContainers"></a>func ListContainers
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method ListContainers() [ListContainerData](#ListContainerData)</div>
-ListContainers returns a list of containers in no particular order. There are
-returned as an array of ListContainerData structs. See also [GetContainer](#GetContainer).
+method ListContainers() [Container](#Container)</div>
+ListContainers returns information about all containers.
+See also [GetContainer](#GetContainer).
### <a name="ListImages"></a>func ListImages
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method ListImages() [ImageInList](#ImageInList)</div>
-ListImages returns an array of ImageInList structures which provide basic information about
-an image currently in storage. See also [InspectImage](InspectImage).
+method ListImages() [Image](#Image)</div>
+ListImages returns information about the images that are currently in storage.
+See also [InspectImage](InspectImage).
### <a name="ListPods"></a>func ListPods
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -742,6 +740,11 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.ListPods
]
}
~~~
+### <a name="LoadImage"></a>func LoadImage
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method LoadImage(name: [string](https://godoc.org/builtin#string), inputFile: [string](https://godoc.org/builtin#string), quiet: [bool](https://godoc.org/builtin#bool), deleteFile: [bool](https://godoc.org/builtin#bool)) [MoreResponse](#MoreResponse)</div>
+LoadImage allows you to load an image into local storage from a tarball.
### <a name="MountContainer"></a>func MountContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -778,47 +781,39 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.PausePod '{"name": "fooba
"pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
}
~~~
-### <a name="Ping"></a>func Ping
+### <a name="PodStateData"></a>func PodStateData
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method Ping() [StringResponse](#StringResponse)</div>
-Ping provides a response for developers to ensure their varlink setup is working.
-#### Example
-~~~
-$ varlink call -m unix:/run/podman/io.podman/io.podman.Ping
-{
- "ping": {
- "message": "OK"
- }
-}
-~~~
+method PodStateData(name: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
+PodStateData returns inspectr level information of a given pod in string form. This call is for
+development of Podman only and generally should not be used.
### <a name="PullImage"></a>func PullImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method PullImage(name: [string](https://godoc.org/builtin#string), certDir: [string](https://godoc.org/builtin#string), creds: [string](https://godoc.org/builtin#string), signaturePolicy: [string](https://godoc.org/builtin#string), tlsVerify: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
-PullImage pulls an image from a repository to local storage. After the pull is successful, the ID of the image
-is returned.
-#### Example
-~~~
-$ varlink call -m unix:/run/podman/io.podman/io.podman.PullImage '{"name": "registry.fedoraproject.org/fedora"}'
-{
- "id": "426866d6fa419873f97e5cbd320eeb22778244c1dfffa01c944db3114f55772e"
-}
-~~~
+method PullImage(name: [string](https://godoc.org/builtin#string), certDir: [string](https://godoc.org/builtin#string), creds: [string](https://godoc.org/builtin#string), signaturePolicy: [string](https://godoc.org/builtin#string), tlsVerify: [](#)) [MoreResponse](#MoreResponse)</div>
+PullImage pulls an image from a repository to local storage. After a successful pull, the image id and logs
+are returned as a [MoreResponse](#MoreResponse). This connection also will handle a WantsMores request to send
+status as it occurs.
### <a name="PushImage"></a>func PushImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method PushImage(name: [string](https://godoc.org/builtin#string), tag: [string](https://godoc.org/builtin#string), tlsverify: [bool](https://godoc.org/builtin#bool), signaturePolicy: [string](https://godoc.org/builtin#string), creds: [string](https://godoc.org/builtin#string), certDir: [string](https://godoc.org/builtin#string), compress: [bool](https://godoc.org/builtin#bool), format: [string](https://godoc.org/builtin#string), removeSignatures: [bool](https://godoc.org/builtin#bool), signBy: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
+method PushImage(name: [string](https://godoc.org/builtin#string), tag: [string](https://godoc.org/builtin#string), tlsverify: [](#), signaturePolicy: [string](https://godoc.org/builtin#string), creds: [string](https://godoc.org/builtin#string), certDir: [string](https://godoc.org/builtin#string), compress: [bool](https://godoc.org/builtin#bool), format: [string](https://godoc.org/builtin#string), removeSignatures: [bool](https://godoc.org/builtin#bool), signBy: [string](https://godoc.org/builtin#string)) [MoreResponse](#MoreResponse)</div>
PushImage takes three input arguments: the name or ID of an image, the fully-qualified destination name of the image,
and a boolean as to whether tls-verify should be used (with false disabling TLS, not affecting the default behavior).
It will return an [ImageNotFound](#ImageNotFound) error if
-the image cannot be found in local storage; otherwise the ID of the image will be returned on success.
+the image cannot be found in local storage; otherwise it will return a [MoreResponse](#MoreResponse)
+### <a name="ReceiveFile"></a>func ReceiveFile
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method ReceiveFile(path: [string](https://godoc.org/builtin#string), delete: [bool](https://godoc.org/builtin#bool)) [int](https://godoc.org/builtin#int)</div>
+ReceiveFile allows the host to send a remote client a file
### <a name="RemoveContainer"></a>func RemoveContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method RemoveContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
-RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running
-container can be stopped and removed. Upon successful removal of the container, its ID is returned. If the
+method RemoveContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool), removeVolumes: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
+RemoveContainer requires the name or ID of container as well a boolean representing whether a running container can be stopped and removed, and a boolean
+indicating whether to remove builtin volumes. Upon successful removal of the
+container, its ID is returned. If the
container cannot be found by name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned.
#### Example
~~~
@@ -859,22 +854,6 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.RemovePod '{"name": "62f4
"pod": "62f4fd98cb57f529831e8f90610e54bba74bd6f02920ffb485e15376ed365c20"
}
~~~
-### <a name="RenameContainer"></a>func RenameContainer
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method RenameContainer() [NotImplemented](#NotImplemented)</div>
-This method has not be implemented yet.
-### <a name="ReplayKube"></a>func ReplayKube
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method ReplayKube() [NotImplemented](#NotImplemented)</div>
-ReplayKube recreates a pod and its containers based on a Kubernetes v1 Pod description (in YAML)
-like that created by GenerateKube. See also [GenerateKube](GenerateKube).
-### <a name="ResizeContainerTty"></a>func ResizeContainerTty
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method ResizeContainerTty() [NotImplemented](#NotImplemented)</div>
-This method has not be implemented yet.
### <a name="RestartContainer"></a>func RestartContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -900,13 +879,18 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.RestartPod '{"name": "135
"pod": "135d71b9495f7c3967f536edad57750bfdb569336cd107d8aabab45565ffcfb6"
}
~~~
-### <a name="SearchImage"></a>func SearchImage
+### <a name="SearchImages"></a>func SearchImages
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method SearchImages(query: [string](https://godoc.org/builtin#string), limit: [](#), tlsVerify: [](#), filter: [ImageSearchFilter](#ImageSearchFilter)) [ImageSearchResult](#ImageSearchResult)</div>
+SearchImages searches available registries for images that contain the
+contents of "query" in their name. If "limit" is given, limits the amount of
+search results per registry.
+### <a name="SendFile"></a>func SendFile
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method SearchImage(name: [string](https://godoc.org/builtin#string), limit: [int](https://godoc.org/builtin#int)) [ImageSearch](#ImageSearch)</div>
-SearchImage takes the string of an image name and a limit of searches from each registries to be returned. SearchImage
-will then use a glob-like match to find the image you are searching for. The images are returned in an array of
-ImageSearch structures which contain information about the image as well as its fully-qualified name.
+method SendFile(type: [string](https://godoc.org/builtin#string), length: [int](https://godoc.org/builtin#int)) [string](https://godoc.org/builtin#string)</div>
+Sendfile allows a remote client to send a file to the host
### <a name="StartContainer"></a>func StartContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -968,11 +952,6 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.StopPod '{"name": "135d71
method TagImage(name: [string](https://godoc.org/builtin#string), tagged: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
TagImage takes the name or ID of an image in local storage as well as the desired tag name. If the image cannot
be found, an [ImageNotFound](#ImageNotFound) error will be returned; otherwise, the ID of the image is returned on success.
-### <a name="TopPod"></a>func TopPod
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method TopPod() [NotImplemented](#NotImplemented)</div>
-This method has not been implemented yet.
### <a name="UnmountContainer"></a>func UnmountContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -1007,11 +986,21 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.UnpausePod '{"name": "foo
"pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
}
~~~
-### <a name="UpdateContainer"></a>func UpdateContainer
+### <a name="VolumeCreate"></a>func VolumeCreate
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method VolumeCreate(options: [VolumeCreateOpts](#VolumeCreateOpts)) [string](https://godoc.org/builtin#string)</div>
+VolumeCreate creates a volume on a remote host
+### <a name="VolumeRemove"></a>func VolumeRemove
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method UpdateContainer() [NotImplemented](#NotImplemented)</div>
-This method has not be implemented yet.
+method VolumeRemove(options: [VolumeRemoveOpts](#VolumeRemoveOpts)) [[]string](#[]string)</div>
+VolumeRemove removes a volume on a remote host
+### <a name="VolumesPrune"></a>func VolumesPrune
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method VolumesPrune() [[]string](#[]string), [[]string](#[]string)</div>
+VolumesPrune removes unused volumes on the host
### <a name="WaitContainer"></a>func WaitContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -1019,70 +1008,122 @@ method WaitContainer(name: [string](https://godoc.org/builtin#string)) [int](htt
WaitContainer takes the name or ID of a container and waits until the container stops. Upon stopping, the return
code of the container is returned. If the container container cannot be found by ID or name,
a [ContainerNotFound](#ContainerNotFound) error is returned.
-### <a name="WaitPod"></a>func WaitPod
-<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-
-method WaitPod() [NotImplemented](#NotImplemented)</div>
-This method has not be implemented yet.
## Types
### <a name="BuildInfo"></a>type BuildInfo
BuildInfo is used to describe user input for building images
-dockerfile [[]string](#[]string)
+additionalTags [[]string](#[]string)
-tags [[]string](#[]string)
+annotations [[]string](#[]string)
-add_hosts [[]string](#[]string)
+buildArgs [map[string]](#map[string])
-cgroup_parent [string](https://godoc.org/builtin#string)
+buildOptions [BuildOptions](#BuildOptions)
-cpu_period [int](https://godoc.org/builtin#int)
+cniConfigDir [string](https://godoc.org/builtin#string)
-cpu_quota [int](https://godoc.org/builtin#int)
+cniPluginDir [string](https://godoc.org/builtin#string)
-cpu_shares [int](https://godoc.org/builtin#int)
+compression [string](https://godoc.org/builtin#string)
-cpuset_cpus [string](https://godoc.org/builtin#string)
+contextDir [string](https://godoc.org/builtin#string)
-cpuset_mems [string](https://godoc.org/builtin#string)
+defaultsMountFilePath [string](https://godoc.org/builtin#string)
-memory [string](https://godoc.org/builtin#string)
+dockerfiles [[]string](#[]string)
-memory_swap [string](https://godoc.org/builtin#string)
+err [string](https://godoc.org/builtin#string)
-security_opts [[]string](#[]string)
+forceRmIntermediateCtrs [bool](https://godoc.org/builtin#bool)
-shm_size [string](https://godoc.org/builtin#string)
+iidfile [string](https://godoc.org/builtin#string)
-ulimit [[]string](#[]string)
+label [[]string](#[]string)
-volume [[]string](#[]string)
+layers [bool](https://godoc.org/builtin#bool)
+
+nocache [bool](https://godoc.org/builtin#bool)
+
+out [string](https://godoc.org/builtin#string)
+
+output [string](https://godoc.org/builtin#string)
+
+outputFormat [string](https://godoc.org/builtin#string)
+
+pullPolicy [string](https://godoc.org/builtin#string)
+
+quiet [bool](https://godoc.org/builtin#bool)
+
+remoteIntermediateCtrs [bool](https://godoc.org/builtin#bool)
+
+reportWriter [string](https://godoc.org/builtin#string)
+
+runtimeArgs [[]string](#[]string)
+
+signaturePolicyPath [string](https://godoc.org/builtin#string)
squash [bool](https://godoc.org/builtin#bool)
+### <a name="BuildOptions"></a>type BuildOptions
-pull [bool](https://godoc.org/builtin#bool)
+BuildOptions are are used to describe describe physical attributes of the build
-pull_always [bool](https://godoc.org/builtin#bool)
+addHosts [[]string](#[]string)
-force_rm [bool](https://godoc.org/builtin#bool)
+cgroupParent [string](https://godoc.org/builtin#string)
-rm [bool](https://godoc.org/builtin#bool)
+cpuPeriod [int](https://godoc.org/builtin#int)
-label [[]string](#[]string)
+cpuQuota [int](https://godoc.org/builtin#int)
-annotations [[]string](#[]string)
+cpuShares [int](https://godoc.org/builtin#int)
-build_args [map[string]](#map[string])
+cpusetCpus [string](https://godoc.org/builtin#string)
-image_format [string](https://godoc.org/builtin#string)
-### <a name="BuildResponse"></a>type BuildResponse
+cpusetMems [string](https://godoc.org/builtin#string)
+
+memory [int](https://godoc.org/builtin#int)
+
+memorySwap [int](https://godoc.org/builtin#int)
+
+shmSize [string](https://godoc.org/builtin#string)
+
+ulimit [[]string](#[]string)
+
+volume [[]string](#[]string)
+### <a name="Container"></a>type Container
-BuildResponse is used to describe the responses for building images
-logs [[]string](#[]string)
id [string](https://godoc.org/builtin#string)
+
+image [string](https://godoc.org/builtin#string)
+
+imageid [string](https://godoc.org/builtin#string)
+
+command [[]string](#[]string)
+
+createdat [string](https://godoc.org/builtin#string)
+
+runningfor [string](https://godoc.org/builtin#string)
+
+status [string](https://godoc.org/builtin#string)
+
+ports [ContainerPortMappings](#ContainerPortMappings)
+
+rootfssize [int](https://godoc.org/builtin#int)
+
+rwsize [int](https://godoc.org/builtin#int)
+
+names [string](https://godoc.org/builtin#string)
+
+labels [map[string]](#map[string])
+
+mounts [ContainerMount](#ContainerMount)
+
+containerrunning [bool](https://godoc.org/builtin#bool)
+
+namespaces [ContainerNameSpace](#ContainerNameSpace)
### <a name="ContainerChanges"></a>type ContainerChanges
ContainerChanges describes the return struct for ListContainerChanges
@@ -1366,6 +1407,29 @@ host_gid_mapping [bool](https://godoc.org/builtin#bool)
uid_map [IDMap](#IDMap)
gid_map [IDMap](#IDMap)
+### <a name="Image"></a>type Image
+
+
+
+id [string](https://godoc.org/builtin#string)
+
+parentId [string](https://godoc.org/builtin#string)
+
+repoTags [[]string](#[]string)
+
+repoDigests [[]string](#[]string)
+
+created [string](https://godoc.org/builtin#string)
+
+size [int](https://godoc.org/builtin#int)
+
+virtualSize [int](https://godoc.org/builtin#int)
+
+containers [int](https://godoc.org/builtin#int)
+
+labels [map[string]](#map[string])
+
+isParent [bool](https://godoc.org/builtin#bool)
### <a name="ImageHistory"></a>type ImageHistory
ImageHistory describes the returned structure from ImageHistory.
@@ -1381,34 +1445,35 @@ tags [[]string](#[]string)
size [int](https://godoc.org/builtin#int)
comment [string](https://godoc.org/builtin#string)
-### <a name="ImageInList"></a>type ImageInList
+### <a name="ImageSaveOptions"></a>type ImageSaveOptions
-ImageInList describes the structure that is returned in
-ListImages.
-id [string](https://godoc.org/builtin#string)
-parentId [string](https://godoc.org/builtin#string)
+name [string](https://godoc.org/builtin#string)
-repoTags [[]string](#[]string)
+format [string](https://godoc.org/builtin#string)
-repoDigests [[]string](#[]string)
+output [string](https://godoc.org/builtin#string)
-created [string](https://godoc.org/builtin#string)
+outputType [string](https://godoc.org/builtin#string)
-size [int](https://godoc.org/builtin#int)
+moreTags [[]string](#[]string)
-virtualSize [int](https://godoc.org/builtin#int)
+quiet [bool](https://godoc.org/builtin#bool)
-containers [int](https://godoc.org/builtin#int)
+compress [bool](https://godoc.org/builtin#bool)
+### <a name="ImageSearchFilter"></a>type ImageSearchFilter
-labels [map[string]](#map[string])
-isParent [bool](https://godoc.org/builtin#bool)
-### <a name="ImageSearch"></a>type ImageSearch
-ImageSearch is the returned structure for SearchImage. It is returned
-in array form.
+is_official [](#)
+
+is_automated [](#)
+
+star_count [int](https://godoc.org/builtin#int)
+### <a name="ImageSearchResult"></a>type ImageSearchResult
+
+Represents a single search result from SearchImages
description [string](https://godoc.org/builtin#string)
@@ -1416,6 +1481,8 @@ is_official [bool](https://godoc.org/builtin#bool)
is_automated [bool](https://godoc.org/builtin#bool)
+registry [string](https://godoc.org/builtin#string)
+
name [string](https://godoc.org/builtin#string)
star_count [int](https://godoc.org/builtin#int)
@@ -1490,39 +1557,6 @@ graph_root [string](https://godoc.org/builtin#string)
graph_status [InfoGraphStatus](#InfoGraphStatus)
run_root [string](https://godoc.org/builtin#string)
-### <a name="ListContainerData"></a>type ListContainerData
-
-ListContainerData is the returned struct for an individual container
-
-id [string](https://godoc.org/builtin#string)
-
-image [string](https://godoc.org/builtin#string)
-
-imageid [string](https://godoc.org/builtin#string)
-
-command [[]string](#[]string)
-
-createdat [string](https://godoc.org/builtin#string)
-
-runningfor [string](https://godoc.org/builtin#string)
-
-status [string](https://godoc.org/builtin#string)
-
-ports [ContainerPortMappings](#ContainerPortMappings)
-
-rootfssize [int](https://godoc.org/builtin#int)
-
-rwsize [int](https://godoc.org/builtin#int)
-
-names [string](https://godoc.org/builtin#string)
-
-labels [map[string]](#map[string])
-
-mounts [ContainerMount](#ContainerMount)
-
-containerrunning [bool](https://godoc.org/builtin#bool)
-
-namespaces [ContainerNameSpace](#ContainerNameSpace)
### <a name="ListPodContainerInfo"></a>type ListPodContainerInfo
ListPodContainerInfo is a returned struct for describing containers
@@ -1552,6 +1586,13 @@ labels [map[string]](#map[string])
numberofcontainers [string](https://godoc.org/builtin#string)
containersinfo [ListPodContainerInfo](#ListPodContainerInfo)
+### <a name="MoreResponse"></a>type MoreResponse
+
+MoreResponse is a struct for when responses from varlink requires longer output
+
+logs [[]string](#[]string)
+
+id [string](https://godoc.org/builtin#string)
### <a name="NotImplemented"></a>type NotImplemented
@@ -1618,7 +1659,7 @@ pull [bool](https://godoc.org/builtin#bool)
signaturePolicyPath [string](https://godoc.org/builtin#string)
-tlsVerify [bool](https://godoc.org/builtin#bool)
+tlsVerify [](#)
label [string](https://godoc.org/builtin#string)
@@ -1639,21 +1680,41 @@ control_socket [string](https://godoc.org/builtin#string)
message [string](https://godoc.org/builtin#string)
-### <a name="Version"></a>type Version
+### <a name="Volume"></a>type Volume
-Version is the structure returned by GetVersion
-version [string](https://godoc.org/builtin#string)
-go_version [string](https://godoc.org/builtin#string)
+name [string](https://godoc.org/builtin#string)
-git_commit [string](https://godoc.org/builtin#string)
+labels [map[string]](#map[string])
+
+mountPoint [string](https://godoc.org/builtin#string)
-built [int](https://godoc.org/builtin#int)
+driver [string](https://godoc.org/builtin#string)
-os_arch [string](https://godoc.org/builtin#string)
+options [map[string]](#map[string])
-remote_api_version [int](https://godoc.org/builtin#int)
+scope [string](https://godoc.org/builtin#string)
+### <a name="VolumeCreateOpts"></a>type VolumeCreateOpts
+
+
+
+volumeName [string](https://godoc.org/builtin#string)
+
+driver [string](https://godoc.org/builtin#string)
+
+labels [map[string]](#map[string])
+
+options [map[string]](#map[string])
+### <a name="VolumeRemoveOpts"></a>type VolumeRemoveOpts
+
+
+
+volumes [[]string](#[]string)
+
+all [bool](https://godoc.org/builtin#bool)
+
+force [bool](https://godoc.org/builtin#bool)
## Errors
### <a name="ContainerNotFound"></a>type ContainerNotFound
@@ -1682,3 +1743,6 @@ PodNotFound means the pod could not be found by the provided name or ID in local
### <a name="RuntimeError"></a>type RuntimeError
RuntimeErrors generally means a runtime could not be found or gotten.
+### <a name="VolumeNotFound"></a>type VolumeNotFound
+
+VolumeNotFound means the volume could not be found by the name or ID in local storage.
diff --git a/Makefile b/Makefile
index 8e94f1970..f634fcc81 100644
--- a/Makefile
+++ b/Makefile
@@ -34,7 +34,7 @@ SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo
PACKAGES ?= $(shell $(GO) list -tags "${BUILDTAGS}" ./... | grep -v github.com/containers/libpod/vendor | grep -v e2e | grep -v system )
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
-GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}")
+GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
BUILD_INFO ?= $(shell date +%s)
LIBPOD := ${PROJECT}/libpod
LDFLAGS_PODMAN ?= $(LDFLAGS) -X $(LIBPOD).gitCommit=$(GIT_COMMIT) -X $(LIBPOD).buildInfo=$(BUILD_INFO)
@@ -94,6 +94,7 @@ help:
ifeq ("$(wildcard $(GOPKGDIR))","")
mkdir -p "$(GOPKGBASEDIR)"
ln -sf "$(CURDIR)" "$(GOPKGBASEDIR)"
+ ln -sf "$(CURDIR)/vendor/github.com/varlink" "$(FIRST_GOPATH)/src/github.com/varlink"
endif
touch $@
@@ -105,11 +106,6 @@ gofmt: ## Verify the source code gofmt
find . -name '*.go' ! -path './vendor/*' -exec gofmt -s -w {} \+
git diff --exit-code
-test/bin2img/bin2img: .gopathok $(wildcard test/bin2img/*.go)
- $(GO) build -ldflags '$(LDFLAGS)' -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/bin2img
-
-test/copyimg/copyimg: .gopathok $(wildcard test/copyimg/*.go)
- $(GO) build -ldflags '$(LDFLAGS)' -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/copyimg
test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
$(GO) build -ldflags '$(LDFLAGS)' -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp
@@ -140,9 +136,7 @@ clean: ## Clean artifacts
_output \
bin \
build \
- test/bin2img/bin2img \
test/checkseccomp/checkseccomp \
- test/copyimg/copyimg \
test/goecho/goecho \
test/testdata/redis-image \
cmd/podman/varlink/iopodman.go \
@@ -210,7 +204,7 @@ binaries: varlink_generate podman podman-remote ## Build podman
install.catatonit:
./hack/install_catatonit.sh
-test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp test/goecho/goecho install.catatonit
+test-binaries: test/checkseccomp/checkseccomp test/goecho/goecho install.catatonit
MANPAGES_MD ?= $(wildcard docs/*.md pkg/*/docs/*.md)
MANPAGES ?= $(MANPAGES_MD:%.md=%)
@@ -352,7 +346,8 @@ vendor: .install.vndr
$(GOPATH)/bin/vndr \
-whitelist "github.com/varlink/go" \
-whitelist "github.com/onsi/ginkgo" \
- -whitelist "github.com/onsi/gomega"
+ -whitelist "github.com/onsi/gomega" \
+ -whitelist "gopkg.in/fsnotify.v1"
.PHONY: \
.gopathok \
diff --git a/README.md b/README.md
index 52324c860..97c548ddd 100644
--- a/README.md
+++ b/README.md
@@ -31,10 +31,19 @@ This project tests all builds against each supported version of Fedora, the late
## Out of scope
-* Signing and pushing images to various image storages.
- See [Skopeo](https://github.com/containers/skopeo/).
-* Container Runtimes daemons for working with the Kubernetes CRI interface.
- See [CRI-O](https://github.com/kubernetes-sigs/cri-o).
+* Specializing in signing and pushing images to various storage backends.
+ See [Skopeo](https://github.com/containers/skopeo/) for those tasks.
+* Container runtimes daemons for working with the Kubernetes CRI interface.
+ [CRI-O](https://github.com/kubernetes-sigs/cri-o) specializes in that.
+* Supporting `docker-compose`. We believe that Kubernetes is the defacto
+ standard for composing Pods and for orchestrating containers, making
+ Kubernetes YAML a defacto standard file format. Hence, Podman allows the
+ creation and execution of Pods from a Kubernetes YAML file (see
+ [podman-play-kube](https://github.com/containers/libpod/blob/master/docs/podman-play-kube.1.md)).
+ Podman can also generate Kubernetes YAML based on a container or Pod (see
+ [podman-generate-kube](https://github.com/containers/libpod/blob/master/docs/podman-generate-kube.1.md)),
+ which allows for an easy transition from a local development environment
+ to a production Kubernetes cluster.
## OCI Projects Plans
@@ -83,18 +92,23 @@ Information about contributing to this project.
## Buildah and Podman relationship
-Buildah and Podman are two complementary Open-source projects that are available on
-most Linux platforms and both projects reside at [GitHub.com](https://github.com)
-with [Buildah](https://buildah.io) [(GitHub)](https://github.com/containers/buildah) and
-[Podman](https://podman.io) [(GitHub)](https://github.com/containers/libpod). Both Buildah and Podman are
-command line tools that work on OCI images and containers. The two projects
-differentiate in their specialization.
+Buildah and Podman are two complementary open-source projects that are
+available on most Linux platforms and both projects reside at
+[GitHub.com](https://github.com) with Buildah
+[here](https://github.com/containers/buildah) and Podman
+[here](https://github.com/containers/libpod). Both, Buildah and Podman are
+command line tools that work on Open Container Initiative (OCI) images and
+containers. The two projects differentiate in their specialization.
Buildah specializes in building OCI images. Buildah's commands replicate all
-of the commands that are found in a Dockerfile. Buildah’s goal is also to
-provide a lower level coreutils interface to build images, allowing people to build
-containers without requiring a Dockerfile. The intent with Buildah is to allow other
-scripting languages to build container images, without requiring a daemon.
+of the commands that are found in a Dockerfile. This allows building images
+with and without Dockerfiles while not requiring any root privileges.
+Buildah’s ultimate goal is to provide a lower-level coreutils interface to
+build images. The flexibility of building images without Dockerfiles allows
+for the integration of other scripting languages into the build process.
+Buildah follows a simple fork-exec model and does not run as a daemon
+but it is based on a comprehensive API in golang, which can be vendored
+into other tools.
Podman specializes in all of the commands and functions that help you to maintain and modify
OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers
@@ -103,12 +117,12 @@ created from those images.
A major difference between Podman and Buildah is their concept of a container. Podman
allows users to create "traditional containers" where the intent of these containers is
to be long lived. While Buildah containers are really just created to allow content
-to be added back to the container image. An easy way to think of it is the
+to be added back to the container image. An easy way to think of it is the
`buildah run` command emulates the RUN command in a Dockerfile while the `podman run`
command emulates the `docker run` command in functionality. Because of this and their underlying
-storage differences, you cannot see Podman containers from within Buildah or vice versa.
+storage differences, you can not see Podman containers from within Buildah or vice versa.
-In short Buildah is an efficient way to create OCI images while Podman allows
+In short, Buildah is an efficient way to create OCI images while Podman allows
you to manage and maintain those images and containers in a production environment using
familiar container cli commands. For more details, see the
[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools).
diff --git a/cmd/podman/attach.go b/cmd/podman/attach.go
index bcfee6891..074675e45 100644
--- a/cmd/podman/attach.go
+++ b/cmd/podman/attach.go
@@ -3,57 +3,57 @@ package main
import (
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- attachFlags = []cli.Flag{
- cli.StringFlag{
- Name: "detach-keys",
- Usage: "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _.",
- },
- cli.BoolFlag{
- Name: "no-stdin",
- Usage: "Do not attach STDIN. The default is false.",
- },
- cli.BoolTFlag{
- Name: "sig-proxy",
- Usage: "Proxy received signals to the process (default true)",
- },
- LatestFlag,
- }
+ attachCommand cliconfig.AttachValues
attachDescription = "The podman attach command allows you to attach to a running container using the container's ID or name, either to view its ongoing output or to control it interactively."
- attachCommand = cli.Command{
- Name: "attach",
- Usage: "Attach to a running container",
- Description: attachDescription,
- Flags: sortFlags(attachFlags),
- Action: attachCmd,
- ArgsUsage: "",
- OnUsageError: usageErrorHandler,
+ _attachCommand = &cobra.Command{
+ Use: "attach",
+ Short: "Attach to a running container",
+ Long: attachDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ attachCommand.InputArgs = args
+ attachCommand.GlobalFlags = MainGlobalOpts
+ return attachCmd(&attachCommand)
+ },
+ Example: `podman attach ctrID
+ podman attach 1234
+ podman attach --no-stdin foobar`,
}
)
-func attachCmd(c *cli.Context) error {
- args := c.Args()
+func init() {
+ attachCommand.Command = _attachCommand
+ attachCommand.SetUsageTemplate(UsageTemplate())
+ flags := attachCommand.Flags()
+ flags.StringVar(&attachCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
+ flags.BoolVar(&attachCommand.NoStdin, "no-stdin", false, "Do not attach STDIN. The default is false")
+ flags.BoolVar(&attachCommand.SigProxy, "sig-proxy", true, "Proxy received signals to the process (default true)")
+ flags.BoolVarP(&attachCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func attachCmd(c *cliconfig.AttachValues) error {
+ args := c.InputArgs
var ctr *libpod.Container
- if err := validateFlags(c, attachFlags); err != nil {
- return err
- }
- if len(c.Args()) > 1 || (len(c.Args()) == 0 && !c.Bool("latest")) {
+
+ if len(c.InputArgs) > 1 || (len(c.InputArgs) == 0 && !c.Latest) {
return errors.Errorf("attach requires the name or id of one running container or the latest flag")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- if c.Bool("latest") {
+ if c.Latest {
ctr, err = runtime.GetLatestContainer()
} else {
ctr, err = runtime.LookupContainer(args[0])
@@ -72,11 +72,12 @@ func attachCmd(c *cli.Context) error {
}
inputStream := os.Stdin
- if c.Bool("no-stdin") {
+ if c.NoStdin {
inputStream = nil
}
- if err := startAttachCtr(ctr, os.Stdout, os.Stderr, inputStream, c.String("detach-keys"), c.BoolT("sig-proxy"), false); err != nil {
+ // If the container is in a pod, also set to recursively start dependencies
+ if err := startAttachCtr(ctr, os.Stdout, os.Stderr, inputStream, c.DetachKeys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != libpod.ErrDetach {
return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
}
diff --git a/cmd/podman/build.go b/cmd/podman/build.go
index 0cd1bfbe3..e40e35cb5 100644
--- a/cmd/podman/build.go
+++ b/cmd/podman/build.go
@@ -1,7 +1,6 @@
package main
import (
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -9,39 +8,69 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
buildahcli "github.com/containers/buildah/pkg/cli"
- "github.com/containers/buildah/pkg/parse"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
+ "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- layerFlags = []cli.Flag{
- cli.BoolTFlag{
- Name: "force-rm",
- Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful. (default true)",
- },
- cli.BoolTFlag{
- Name: "layers",
- Usage: "Cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. ",
- },
- }
+ buildCommand cliconfig.BuildValues
buildDescription = "Builds an OCI or Docker image using instructions from one\n" +
"or more Dockerfiles and a specified build context directory."
- buildCommand = cli.Command{
- Name: "build",
- Usage: "Build an image using instructions from Dockerfiles",
- Description: buildDescription,
- Flags: sortFlags(append(append(buildahcli.BudFlags, layerFlags...), buildahcli.FromAndBudFlags...)),
- Action: buildCmd,
- ArgsUsage: "CONTEXT-DIRECTORY | URL",
- SkipArgReorder: true,
- OnUsageError: usageErrorHandler,
+ layerValues buildahcli.LayerResults
+ budFlagsValues buildahcli.BudResults
+ fromAndBudValues buildahcli.FromAndBudResults
+ userNSValues buildahcli.UserNSResults
+ namespaceValues buildahcli.NameSpaceResults
+
+ _buildCommand = &cobra.Command{
+ Use: "build",
+ Short: "Build an image using instructions from Dockerfiles",
+ Long: buildDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ buildCommand.InputArgs = args
+ buildCommand.GlobalFlags = MainGlobalOpts
+ buildCommand.BudResults = &budFlagsValues
+ buildCommand.UserNSResults = &userNSValues
+ buildCommand.FromAndBudResults = &fromAndBudValues
+ buildCommand.LayerResults = &layerValues
+ buildCommand.NameSpaceResults = &namespaceValues
+ return buildCmd(&buildCommand)
+ },
+ Example: `podman build .
+ podman build --cert-dir ~/auth --creds=username:password -t imageName -f Dockerfile.simple .
+ podman build --layers --force-rm --tag imageName .`,
}
)
+func init() {
+ buildCommand.Command = _buildCommand
+ buildCommand.SetUsageTemplate(UsageTemplate())
+ flags := buildCommand.Flags()
+ flags.SetInterspersed(false)
+
+ budFlags := buildahcli.GetBudFlags(&budFlagsValues)
+ flag := budFlags.Lookup("pull-always")
+ flag.Value.Set("true")
+ flag.DefValue = "true"
+ layerFlags := buildahcli.GetLayerFlags(&layerValues)
+ flag = layerFlags.Lookup("layers")
+ flag.Value.Set(useLayers())
+ flag.DefValue = (useLayers())
+ flag = layerFlags.Lookup("force-rm")
+ flag.Value.Set("true")
+ flag.DefValue = "true"
+
+ fromAndBugFlags := buildahcli.GetFromAndBudFlags(&fromAndBudValues, &userNSValues, &namespaceValues)
+
+ flags.AddFlagSet(&budFlags)
+ flags.AddFlagSet(&layerFlags)
+ flags.AddFlagSet(&fromAndBugFlags)
+}
+
func getDockerfiles(files []string) []string {
var dockerfiles []string
for _, f := range files {
@@ -54,30 +83,30 @@ func getDockerfiles(files []string) []string {
return dockerfiles
}
-func buildCmd(c *cli.Context) error {
+func buildCmd(c *cliconfig.BuildValues) error {
// The following was taken directly from containers/buildah/cmd/bud.go
// TODO Find a away to vendor more of this in rather than copy from bud
-
output := ""
tags := []string{}
- if c.IsSet("tag") || c.IsSet("t") {
- tags = c.StringSlice("tag")
+ if c.Flag("tag").Changed {
+ tags = c.Tag
if len(tags) > 0 {
output = tags[0]
tags = tags[1:]
}
}
+
pullPolicy := imagebuildah.PullNever
- if c.BoolT("pull") {
+ if c.Pull {
pullPolicy = imagebuildah.PullIfMissing
}
- if c.Bool("pull-always") {
+ if c.PullAlways {
pullPolicy = imagebuildah.PullAlways
}
args := make(map[string]string)
- if c.IsSet("build-arg") {
- for _, arg := range c.StringSlice("build-arg") {
+ if c.Flag("build-arg").Changed {
+ for _, arg := range c.BuildArg {
av := strings.SplitN(arg, "=", 2)
if len(av) > 1 {
args[av[0]] = av[1]
@@ -87,15 +116,15 @@ func buildCmd(c *cli.Context) error {
}
}
- dockerfiles := getDockerfiles(c.StringSlice("file"))
- format, err := getFormat(c)
+ dockerfiles := getDockerfiles(c.File)
+ format, err := getFormat(&c.PodmanCommand)
if err != nil {
return nil
}
contextDir := ""
- cliArgs := c.Args()
+ cliArgs := c.InputArgs
- layers := c.BoolT("layers") // layers for podman defaults to true
+ layers := c.Layers // layers for podman defaults to true
// Check to see if the BUILDAH_LAYERS environment variable is set and override command-line
if _, ok := os.LookupEnv("BUILDAH_LAYERS"); ok {
layers = buildahcli.UseLayers()
@@ -124,7 +153,7 @@ func buildCmd(c *cli.Context) error {
}
contextDir = absDir
}
- cliArgs = cliArgs.Tail()
+ cliArgs = Tail(cliArgs)
} else {
// No context directory or URL was specified. Try to use the
// home of the first locally-available Dockerfile.
@@ -153,30 +182,28 @@ func buildCmd(c *cli.Context) error {
if len(dockerfiles) == 0 {
dockerfiles = append(dockerfiles, filepath.Join(contextDir, "Dockerfile"))
}
- if err := parse.ValidateFlags(c, buildahcli.BudFlags); err != nil {
- return err
+
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
}
runtimeFlags := []string{}
- for _, arg := range c.StringSlice("runtime-flag") {
+ for _, arg := range c.RuntimeFlags {
runtimeFlags = append(runtimeFlags, "--"+arg)
}
// end from buildah
- runtime, err := libpodruntime.GetRuntime(c)
- if err != nil {
- return errors.Wrapf(err, "could not get runtime")
- }
defer runtime.Shutdown(false)
var stdout, stderr, reporter *os.File
stdout = os.Stdout
stderr = os.Stderr
reporter = os.Stderr
- if c.IsSet("logfile") {
- f, err := os.OpenFile(c.String("logfile"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+ if c.Flag("logfile").Changed {
+ f, err := os.OpenFile(c.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
if err != nil {
- return errors.Errorf("error opening logfile %q: %v", c.String("logfile"), err)
+ return errors.Errorf("error opening logfile %q: %v", c.Logfile, err)
}
defer f.Close()
logrus.SetOutput(f)
@@ -185,70 +212,83 @@ func buildCmd(c *cli.Context) error {
reporter = f
}
- systemContext, err := parse.SystemContextFromOptions(c)
- if err != nil {
- return errors.Wrapf(err, "error building system context")
- }
- systemContext.AuthFilePath = getAuthFile(c.String("authfile"))
- commonOpts, err := parse.CommonBuildOptions(c)
- if err != nil {
- return err
+ var memoryLimit, memorySwap int64
+ if c.Flags().Changed("memory") {
+ memoryLimit, err = units.RAMInBytes(c.Memory)
+ if err != nil {
+ return err
+ }
}
- namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c)
- if err != nil {
- return errors.Wrapf(err, "error parsing namespace-related options")
- }
- usernsOption, idmappingOptions, err := parse.IDMappingOptions(c)
- if err != nil {
- return errors.Wrapf(err, "error parsing ID mapping options")
+ if c.Flags().Changed("memory-swap") {
+ memorySwap, err = units.RAMInBytes(c.MemorySwap)
+ if err != nil {
+ return err
+ }
}
- namespaceOptions.AddOrReplace(usernsOption...)
- ociruntime := runtime.GetOCIRuntimePath()
- if c.IsSet("runtime") {
- ociruntime = c.String("runtime")
+ buildOpts := buildah.CommonBuildOptions{
+ AddHost: c.AddHost,
+ CgroupParent: c.CgroupParent,
+ CPUPeriod: c.CPUPeriod,
+ CPUQuota: c.CPUQuota,
+ CPUShares: c.CPUShares,
+ CPUSetCPUs: c.CPUSetCPUs,
+ CPUSetMems: c.CPUSetMems,
+ Memory: memoryLimit,
+ MemorySwap: memorySwap,
+ ShmSize: c.ShmSize,
+ Ulimit: c.Ulimit,
+ Volumes: c.Volume,
}
+
options := imagebuildah.BuildOptions{
- ContextDirectory: contextDir,
- PullPolicy: pullPolicy,
- Compression: imagebuildah.Gzip,
- Quiet: c.Bool("quiet"),
- SignaturePolicyPath: c.String("signature-policy"),
- Args: args,
- Output: output,
+ CommonBuildOpts: &buildOpts,
AdditionalTags: tags,
- Out: stdout,
+ Annotations: c.Annotation,
+ Args: args,
+ CNIConfigDir: c.CNIConfigDir,
+ CNIPluginPath: c.CNIPlugInPath,
+ Compression: imagebuildah.Gzip,
+ ContextDirectory: contextDir,
+ DefaultMountsFilePath: c.GlobalFlags.DefaultMountsFile,
Err: stderr,
+ ForceRmIntermediateCtrs: c.ForceRm,
+ IIDFile: c.Iidfile,
+ Labels: c.Label,
+ Layers: layers,
+ NoCache: c.NoCache,
+ Out: stdout,
+ Output: output,
+ OutputFormat: format,
+ PullPolicy: pullPolicy,
+ Quiet: c.Quiet,
+ RemoveIntermediateCtrs: c.Rm,
ReportWriter: reporter,
- Runtime: ociruntime,
RuntimeArgs: runtimeFlags,
- OutputFormat: format,
- SystemContext: systemContext,
- NamespaceOptions: namespaceOptions,
- ConfigureNetwork: networkPolicy,
- CNIPluginPath: c.String("cni-plugin-path"),
- CNIConfigDir: c.String("cni-config-dir"),
- IDMappingOptions: idmappingOptions,
- CommonBuildOpts: commonOpts,
- DefaultMountsFilePath: c.GlobalString("default-mounts-file"),
- IIDFile: c.String("iidfile"),
- Squash: c.Bool("squash"),
- Labels: c.StringSlice("label"),
- Annotations: c.StringSlice("annotation"),
- Layers: layers,
- NoCache: c.Bool("no-cache"),
- RemoveIntermediateCtrs: c.BoolT("rm"),
- ForceRmIntermediateCtrs: c.BoolT("force-rm"),
+ SignaturePolicyPath: c.SignaturePolicy,
+ Squash: c.Squash,
+ Target: c.Target,
}
+ return runtime.Build(getContext(), c, options, dockerfiles)
+}
- if c.Bool("quiet") {
- options.ReportWriter = ioutil.Discard
+// Tail returns a string slice after the first element unless there are
+// not enough elements, then it returns an empty slice. This is to replace
+// the urfavecli Tail method for args
+func Tail(a []string) []string {
+ if len(a) >= 2 {
+ return []string(a)[1:]
}
+ return []string{}
+}
- if rootless.IsRootless() {
- options.Isolation = buildah.IsolationOCIRootless
+// useLayers returns false if BUILDAH_LAYERS is set to "0" or "false"
+// otherwise it returns true
+func useLayers() string {
+ layers := os.Getenv("BUILDAH_LAYERS")
+ if strings.ToLower(layers) == "false" || layers == "0" {
+ return "false"
}
-
- return runtime.Build(getContext(), options, dockerfiles...)
+ return "true"
}
diff --git a/cmd/podman/checkpoint.go b/cmd/podman/checkpoint.go
index 5c3363147..c9de5638b 100644
--- a/cmd/podman/checkpoint.go
+++ b/cmd/podman/checkpoint.go
@@ -5,70 +5,69 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ checkpointCommand cliconfig.CheckpointValues
checkpointDescription = `
podman container checkpoint
Checkpoints one or more running containers. The container name or ID can be used.
`
- checkpointFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "keep, k",
- Usage: "Keep all temporary checkpoint files",
+ _checkpointCommand = &cobra.Command{
+ Use: "checkpoint",
+ Short: "Checkpoints one or more containers",
+ Long: checkpointDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ checkpointCommand.InputArgs = args
+ checkpointCommand.GlobalFlags = MainGlobalOpts
+ return checkpointCmd(&checkpointCommand)
},
- cli.BoolFlag{
- Name: "leave-running, R",
- Usage: "Leave the container running after writing checkpoint to disk",
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
},
- cli.BoolFlag{
- Name: "tcp-established",
- Usage: "Checkpoint a container with established TCP connections",
- },
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Checkpoint all running containers",
- },
- LatestFlag,
- }
- checkpointCommand = cli.Command{
- Name: "checkpoint",
- Usage: "Checkpoints one or more containers",
- Description: checkpointDescription,
- Flags: sortFlags(checkpointFlags),
- Action: checkpointCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
+ Example: `podman checkpoint --keep ctrID
+ podman checkpoint --all
+ podman checkpoint --leave-running --latest`,
}
)
-func checkpointCmd(c *cli.Context) error {
+func init() {
+ checkpointCommand.Command = _checkpointCommand
+ checkpointCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := checkpointCommand.Flags()
+ flags.BoolVarP(&checkpointCommand.Keep, "keep", "k", false, "Keep all temporary checkpoint files")
+ flags.BoolVarP(&checkpointCommand.LeaveRunning, "leave-running", "R", false, "Leave the container running after writing checkpoint to disk")
+ flags.BoolVar(&checkpointCommand.TcpEstablished, "tcp-established", false, "Checkpoint a container with established TCP connections")
+ flags.BoolVarP(&checkpointCommand.All, "all", "a", false, "Checkpoint all running containers")
+ flags.BoolVarP(&checkpointCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func checkpointCmd(c *cliconfig.CheckpointValues) error {
if rootless.IsRootless() {
return errors.New("checkpointing a container requires root")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
options := libpod.ContainerCheckpointOptions{
- Keep: c.Bool("keep"),
- KeepRunning: c.Bool("leave-running"),
- TCPEstablished: c.Bool("tcp-established"),
- }
-
- if err := checkAllAndLatest(c); err != nil {
- return err
+ Keep: c.Keep,
+ KeepRunning: c.LeaveRunning,
+ TCPEstablished: c.TcpEstablished,
}
-
- containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+ containers, lastError := getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running")
for _, ctr := range containers {
if err = ctr.Checkpoint(context.TODO(), options); err != nil {
diff --git a/cmd/podman/cleanup.go b/cmd/podman/cleanup.go
index bc4af9f50..d68255aa2 100644
--- a/cmd/podman/cleanup.go
+++ b/cmd/podman/cleanup.go
@@ -4,60 +4,79 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- cleanupFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Cleans up all containers",
- },
- LatestFlag,
- }
+ cleanupCommand cliconfig.CleanupValues
cleanupDescription = `
podman container cleanup
Cleans up mount points and network stacks on one or more containers from the host. The container name or ID can be used. This command is used internally when running containers, but can also be used if container cleanup has failed when a container exits.
`
- cleanupCommand = cli.Command{
- Name: "cleanup",
- Usage: "Cleanup network and mountpoints of one or more containers",
- Description: cleanupDescription,
- Flags: sortFlags(cleanupFlags),
- Action: cleanupCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
- OnUsageError: usageErrorHandler,
+ _cleanupCommand = &cobra.Command{
+ Use: "cleanup",
+ Short: "Cleanup network and mountpoints of one or more containers",
+ Long: cleanupDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ cleanupCommand.InputArgs = args
+ cleanupCommand.GlobalFlags = MainGlobalOpts
+ return cleanupCmd(&cleanupCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman container cleanup --latest
+ podman container cleanup ctrID1 ctrID2 ctrID3
+ podman container cleanup --all`,
}
)
-func cleanupCmd(c *cli.Context) error {
- if err := validateFlags(c, cleanupFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+func init() {
+ cleanupCommand.Command = _cleanupCommand
+ cleanupCommand.SetUsageTemplate(UsageTemplate())
+ flags := cleanupCommand.Flags()
+
+ flags.BoolVarP(&cleanupCommand.All, "all", "a", false, "Cleans up all containers")
+ flags.BoolVarP(&cleanupCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.BoolVar(&cleanupCommand.Remove, "rm", false, "After cleanup, remove the container entirely")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func cleanupCmd(c *cliconfig.CleanupValues) error {
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- if err := checkAllAndLatest(c); err != nil {
- return err
- }
-
- cleanupContainers, lastError := getAllOrLatestContainers(c, runtime, -1, "all")
+ cleanupContainers, lastError := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
ctx := getContext()
for _, ctr := range cleanupContainers {
- if err = ctr.Cleanup(ctx); err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
+ hadError := false
+ if c.Remove {
+ if err := runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
+ hadError = true
}
- lastError = errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
} else {
+ if err := ctr.Cleanup(ctx); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
+ hadError = true
+ }
+ }
+ if !hadError {
fmt.Println(ctr.ID())
}
}
diff --git a/cmd/podman/cliconfig/commands.go b/cmd/podman/cliconfig/commands.go
new file mode 100644
index 000000000..7d1e762d9
--- /dev/null
+++ b/cmd/podman/cliconfig/commands.go
@@ -0,0 +1,109 @@
+package cliconfig
+
+// GlobalIsSet is a compatibility method for urfave
+func (p *PodmanCommand) GlobalIsSet(opt string) bool {
+ flag := p.PersistentFlags().Lookup(opt)
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// IsSet is a compatibility method for urfave
+func (p *PodmanCommand) IsSet(opt string) bool {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Bool is a compatibility method for urfave
+func (p *PodmanCommand) Bool(opt string) bool {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return false
+ }
+ val, _ := p.Flags().GetBool(opt)
+ return val
+}
+
+// String is a compatibility method for urfave
+func (p *PodmanCommand) String(opt string) string {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return ""
+ }
+ val, _ := p.Flags().GetString(opt)
+ return val
+}
+
+// StringArray is a compatibility method for urfave
+func (p *PodmanCommand) StringArray(opt string) []string {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return []string{}
+ }
+ val, _ := p.Flags().GetStringArray(opt)
+ return val
+}
+
+// StringSlice is a compatibility method for urfave
+func (p *PodmanCommand) StringSlice(opt string) []string {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return []string{}
+ }
+ val, _ := p.Flags().GetStringSlice(opt)
+ return val
+}
+
+// Int is a compatibility method for urfave
+func (p *PodmanCommand) Int(opt string) int {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return 0
+ }
+ val, _ := p.Flags().GetInt(opt)
+ return val
+}
+
+// Unt is a compatibility method for urfave
+func (p *PodmanCommand) Uint(opt string) uint {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return 0
+ }
+ val, _ := p.Flags().GetUint(opt)
+ return val
+}
+
+// Int64 is a compatibility method for urfave
+func (p *PodmanCommand) Int64(opt string) int64 {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return 0
+ }
+ val, _ := p.Flags().GetInt64(opt)
+ return val
+}
+
+// Unt64 is a compatibility method for urfave
+func (p *PodmanCommand) Uint64(opt string) uint64 {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return 0
+ }
+ val, _ := p.Flags().GetUint64(opt)
+ return val
+}
+
+// Float64 is a compatibility method for urfave
+func (p *PodmanCommand) Float64(opt string) float64 {
+ flag := p.Flags().Lookup(opt)
+ if flag == nil {
+ return 0
+ }
+ val, _ := p.Flags().GetFloat64(opt)
+ return val
+}
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
new file mode 100644
index 000000000..a9032202f
--- /dev/null
+++ b/cmd/podman/cliconfig/config.go
@@ -0,0 +1,554 @@
+package cliconfig
+
+import (
+ "github.com/spf13/cobra"
+)
+
+type PodmanCommand struct {
+ *cobra.Command
+ InputArgs []string
+ GlobalFlags MainFlags
+}
+
+type MainFlags struct {
+ CGroupManager string
+ CniConfigDir string
+ ConmonPath string
+ DefaultMountsFile string
+ HooksDir []string
+ MaxWorks int
+ Namespace string
+ Root string
+ Runroot string
+ Runtime string
+ StorageDriver string
+ StorageOpts []string
+ Syslog bool
+ Trace bool
+
+ Config string
+ CpuProfile string
+ LogLevel string
+ TmpDir string
+}
+
+type AttachValues struct {
+ PodmanCommand
+ DetachKeys string
+ Latest bool
+ NoStdin bool
+ SigProxy bool
+}
+
+type ImagesValues struct {
+ PodmanCommand
+ All bool
+ Digests bool
+ Filter []string
+ Format string
+ Noheading bool
+ NoTrunc bool
+ Quiet bool
+ Sort string
+}
+
+type TagValues struct {
+ PodmanCommand
+}
+
+type WaitValues struct {
+ PodmanCommand
+ Interval uint
+ Latest bool
+}
+
+type CheckpointValues struct {
+ PodmanCommand
+ Keep bool
+ LeaveRunning bool
+ TcpEstablished bool
+ All bool
+ Latest bool
+}
+
+type CommitValues struct {
+ PodmanCommand
+ Change []string
+ Format string
+ Message string
+ Author string
+ Pause bool
+ Quiet bool
+}
+
+type ContainersPrune struct {
+ PodmanCommand
+}
+
+type DiffValues struct {
+ PodmanCommand
+ Archive bool
+ Format string
+}
+
+type ExecValues struct {
+ PodmanCommand
+ Env []string
+ Privileged bool
+ Interfactive bool
+ Tty bool
+ User string
+ Latest bool
+ Workdir string
+}
+
+type ImageExistsValues struct {
+ PodmanCommand
+}
+
+type ContainerExistsValues struct {
+ PodmanCommand
+}
+
+type PodExistsValues struct {
+ PodmanCommand
+}
+
+type ExportValues struct {
+ PodmanCommand
+ Output string
+}
+
+type GenerateKubeValues struct {
+ PodmanCommand
+ Service bool
+}
+
+type HistoryValues struct {
+ PodmanCommand
+ Human bool
+ NoTrunc bool
+ Quiet bool
+ Format string
+}
+type PruneImagesValues struct {
+ PodmanCommand
+ All bool
+}
+
+type PruneContainersValues struct {
+ PodmanCommand
+ Force bool
+}
+
+type ImportValues struct {
+ PodmanCommand
+ Change []string
+ Message string
+ Quiet bool
+}
+
+type InfoValues struct {
+ PodmanCommand
+ Debug bool
+ Format string
+}
+
+type InspectValues struct {
+ PodmanCommand
+ TypeObject string
+ Format string
+ Size bool
+ Latest bool
+}
+
+type KillValues struct {
+ PodmanCommand
+ All bool
+ Signal string
+ Latest bool
+}
+
+type LoadValues struct {
+ PodmanCommand
+ Input string
+ Quiet bool
+ SignaturePolicy string
+}
+
+type LoginValues struct {
+ PodmanCommand
+ Password string
+ StdinPassword bool
+ Username string
+ Authfile string
+ CertDir string
+ GetLogin bool
+ TlsVerify bool
+}
+
+type LogoutValues struct {
+ PodmanCommand
+ Authfile string
+ All bool
+}
+
+type LogsValues struct {
+ PodmanCommand
+ Details bool
+ Follow bool
+ Since string
+ Tail uint64
+ Timestamps bool
+ Latest bool
+}
+
+type MountValues struct {
+ PodmanCommand
+ All bool
+ Format string
+ NoTrunc bool
+ Latest bool
+}
+
+type PauseValues struct {
+ PodmanCommand
+ All bool
+}
+
+type KubePlayValues struct {
+ PodmanCommand
+ Authfile string
+ CertDir string
+ Creds string
+ Quiet bool
+ SignaturePolicy string
+ TlsVerify bool
+}
+
+type PodCreateValues struct {
+ PodmanCommand
+ CgroupParent string
+ Infra bool
+ InfraImage string
+ InfraCommand string
+ LabelFile []string
+ Labels []string
+ Name string
+ PodIDFile string
+ Publish []string
+ Share string
+}
+
+type PodInspectValues struct {
+ PodmanCommand
+ Latest bool
+}
+
+type PodKillValues struct {
+ PodmanCommand
+ All bool
+ Signal string
+ Latest bool
+}
+
+type PodPauseValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+}
+
+type PodPsValues struct {
+ PodmanCommand
+ CtrNames bool
+ CtrIDs bool
+ CtrStatus bool
+ Filter string
+ Format string
+ Latest bool
+ Namespace bool
+ NoTrunc bool
+ Quiet bool
+ Sort string
+}
+
+type PodRestartValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+}
+
+type PodRmValues struct {
+ PodmanCommand
+ All bool
+ Force bool
+ Latest bool
+}
+
+type PodStartValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+}
+type PodStatsValues struct {
+ PodmanCommand
+ All bool
+ NoStream bool
+ NoReset bool
+ Format string
+ Latest bool
+}
+
+type PodStopValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+ Timeout uint
+}
+
+type PodTopValues struct {
+ PodmanCommand
+ Latest bool
+ ListDescriptors bool
+}
+type PodUnpauseValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+}
+
+type PortValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+}
+
+type PsValues struct {
+ PodmanCommand
+ All bool
+ Filter []string
+ Format string
+ Last int
+ Latest bool
+ Namespace bool
+ NoTrunct bool
+ Pod bool
+ Quiet bool
+ Size bool
+ Sort string
+ Sync bool
+}
+
+type PullValues struct {
+ PodmanCommand
+ AllTags bool
+ Authfile string
+ CertDir string
+ Creds string
+ Quiet bool
+ SignaturePolicy string
+ TlsVerify bool
+}
+
+type PushValues struct {
+ PodmanCommand
+ Authfile string
+ CertDir string
+ Compress bool
+ Creds string
+ Format string
+ Quiet bool
+ RemoveSignatures bool
+ SignBy string
+ SignaturePolicy string
+ TlsVerify bool
+}
+
+type RefreshValues struct {
+ PodmanCommand
+}
+
+type RestartValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+ Running bool
+ Timeout uint
+}
+
+type RestoreValues struct {
+ PodmanCommand
+ All bool
+ Keep bool
+ Latest bool
+ TcpEstablished bool
+}
+
+type RmValues struct {
+ PodmanCommand
+ All bool
+ Force bool
+ Latest bool
+ Volumes bool
+}
+
+type RmiValues struct {
+ PodmanCommand
+ All bool
+ Force bool
+}
+
+type RunlabelValues struct {
+ PodmanCommand
+ Authfile string
+ Display bool
+ CertDir string
+ Creds string
+ Name string
+ Opt1 string
+ Opt2 string
+ Opt3 string
+ Quiet bool
+ Pull bool
+ SignaturePolicy string
+ TlsVerify bool
+}
+type SaveValues struct {
+ PodmanCommand
+ Compress bool
+ Format string
+ Output string
+ Quiet bool
+}
+
+type SearchValues struct {
+ PodmanCommand
+ Authfile string
+ Filter []string
+ Format string
+ Limit int
+ NoTrunc bool
+ TlsVerify bool
+}
+
+type SignValues struct {
+ PodmanCommand
+ Directory string
+ SignBy string
+}
+
+type StartValues struct {
+ PodmanCommand
+ Attach bool
+ DetachKeys string
+ Interactive bool
+ Latest bool
+ SigProxy bool
+}
+
+type StatsValues struct {
+ PodmanCommand
+ All bool
+ Format string
+ Latest bool
+ NoReset bool
+ NoStream bool
+}
+
+type StopValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+ Timeout uint
+}
+
+type TopValues struct {
+ PodmanCommand
+ Latest bool
+ ListDescriptors bool
+}
+
+type UmountValues struct {
+ PodmanCommand
+ All bool
+ Force bool
+ Latest bool
+}
+
+type UnpauseValues struct {
+ PodmanCommand
+ All bool
+}
+
+type VarlinkValues struct {
+ PodmanCommand
+ Timeout int64
+}
+
+type SetTrustValues struct {
+ PodmanCommand
+ PolicyPath string
+ PubKeysFile []string
+ TrustType string
+}
+
+type ShowTrustValues struct {
+ PodmanCommand
+ Json bool
+ PolicyPath string
+ Raw bool
+ RegistryPath string
+}
+
+type VersionValues struct {
+ PodmanCommand
+ Format string
+}
+
+type VolumeCreateValues struct {
+ PodmanCommand
+ Driver string
+ Label []string
+ Opt []string
+}
+type VolumeInspectValues struct {
+ PodmanCommand
+ All bool
+ Format string
+}
+
+type VolumeLsValues struct {
+ PodmanCommand
+ Filter string
+ Format string
+ Quiet bool
+}
+
+type VolumePruneValues struct {
+ PodmanCommand
+ Force bool
+}
+
+type VolumeRmValues struct {
+ PodmanCommand
+ All bool
+ Force bool
+}
+
+type CleanupValues struct {
+ PodmanCommand
+ All bool
+ Latest bool
+ Remove bool
+}
+
+type SystemPruneValues struct {
+ PodmanCommand
+ All bool
+ Force bool
+ Volume bool
+}
+
+type SystemRenumberValues struct {
+ PodmanCommand
+}
diff --git a/cmd/podman/cliconfig/create.go b/cmd/podman/cliconfig/create.go
new file mode 100644
index 000000000..b5ca1be9c
--- /dev/null
+++ b/cmd/podman/cliconfig/create.go
@@ -0,0 +1,26 @@
+package cliconfig
+
+import (
+ buildahcli "github.com/containers/buildah/pkg/cli"
+)
+
+type CreateValues struct {
+ PodmanCommand
+}
+
+type RunValues struct {
+ PodmanCommand
+}
+
+type BuildValues struct {
+ PodmanCommand
+ *buildahcli.BudResults
+ *buildahcli.UserNSResults
+ *buildahcli.FromAndBudResults
+ *buildahcli.NameSpaceResults
+ *buildahcli.LayerResults
+}
+
+type CpValues struct {
+ PodmanCommand
+}
diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go
index 57126da4a..fadcca689 100644
--- a/cmd/podman/commands.go
+++ b/cmd/podman/commands.go
@@ -2,151 +2,126 @@
package main
-import "github.com/urfave/cli"
+import (
+ "github.com/spf13/cobra"
+)
-func getAppCommands() []cli.Command {
- return []cli.Command{
- attachCommand,
- commitCommand,
- buildCommand,
- createCommand,
- diffCommand,
- execCommand,
- killCommand,
- kubeCommand,
- loadCommand,
- loginCommand,
- logoutCommand,
- logsCommand,
- mountCommand,
- pauseCommand,
- psCommand,
- podCommand,
- portCommand,
- pushCommand,
- playCommand,
- restartCommand,
- rmCommand,
- runCommand,
- saveCommand,
- searchCommand,
- startCommand,
- statsCommand,
- stopCommand,
- topCommand,
- umountCommand,
- unpauseCommand,
- volumeCommand,
- waitCommand,
+const remoteclient = false
+
+// Commands that the local client implements
+func getMainCommands() []*cobra.Command {
+ rootCommands := []*cobra.Command{
+ _attachCommand,
+ _commitCommand,
+ _createCommand,
+ _diffCommand,
+ _execCommand,
+ _generateCommand,
+ _playCommand,
+ _psCommand,
+ _loginCommand,
+ _logoutCommand,
+ _logsCommand,
+ _mountCommand,
+ _pauseCommand,
+ _portCommand,
+ _refreshCommand,
+ _restartCommand,
+ _restoreCommand,
+ _rmCommand,
+ _runCommand,
+ _searchCommand,
+ _signCommand,
+ _startCommand,
+ _statsCommand,
+ _stopCommand,
+ _topCommand,
+ _umountCommand,
+ _unpauseCommand,
+ _waitCommand,
+ }
+
+ if len(_varlinkCommand.Use) > 0 {
+ rootCommands = append(rootCommands, _varlinkCommand)
+ }
+ return rootCommands
+}
+
+// Commands that the local client implements
+func getImageSubCommands() []*cobra.Command {
+ return []*cobra.Command{
+ _loadCommand,
+ _signCommand,
+ }
+}
+
+// Commands that the local client implements
+func getContainerSubCommands() []*cobra.Command {
+ return []*cobra.Command{
+ _attachCommand,
+ _checkpointCommand,
+ _cleanupCommand,
+ _commitCommand,
+ _createCommand,
+ _diffCommand,
+ _execCommand,
+ _exportCommand,
+ _killCommand,
+ _logsCommand,
+ _psCommand,
+ _mountCommand,
+ _pauseCommand,
+ _portCommand,
+ _pruneContainersCommand,
+ _refreshCommand,
+ _restartCommand,
+ _restoreCommand,
+ _rmCommand,
+ _runCommand,
+ _runlabelCommand,
+ _startCommand,
+ _statsCommand,
+ _stopCommand,
+ _topCommand,
+ _umountCommand,
+ _unpauseCommand,
+ _waitCommand,
}
}
-func getImageSubCommands() []cli.Command {
- return []cli.Command{
- buildCommand,
- importCommand,
- loadCommand,
- pullCommand,
- saveCommand,
- trustCommand,
- signCommand,
+// Commands that the local client implements
+func getPodSubCommands() []*cobra.Command {
+ return []*cobra.Command{
+ _podStatsCommand,
+ _podTopCommand,
}
}
-func getSystemSubCommands() []cli.Command {
- return []cli.Command{infoCommand}
+func getGenerateSubCommands() []*cobra.Command {
+ return []*cobra.Command{
+ _containerKubeCommand,
+ }
}
-func getContainerSubCommands() []cli.Command {
- return []cli.Command{
- attachCommand,
- checkpointCommand,
- cleanupCommand,
- containerExistsCommand,
- commitCommand,
- createCommand,
- diffCommand,
- execCommand,
- exportCommand,
- killCommand,
- logsCommand,
- psCommand,
- mountCommand,
- pauseCommand,
- portCommand,
- pruneContainersCommand,
- refreshCommand,
- restartCommand,
- restoreCommand,
- rmCommand,
- runCommand,
- runlabelCommand,
- startCommand,
- statsCommand,
- stopCommand,
- topCommand,
- umountCommand,
- unpauseCommand,
- // updateCommand,
- waitCommand,
+// Commands that the local client implements
+func getPlaySubCommands() []*cobra.Command {
+ return []*cobra.Command{
+ _playKubeCommand,
}
}
-func getMainAppFlags() []cli.Flag {
- return []cli.Flag{
- cli.StringFlag{
- Name: "cgroup-manager",
- Usage: "Cgroup manager to use (cgroupfs or systemd, default systemd)",
- },
- cli.StringFlag{
- Name: "cni-config-dir",
- Usage: "Path of the configuration directory for CNI networks",
- },
- cli.StringFlag{
- Name: "conmon",
- Usage: "Path of the conmon binary",
- },
- cli.StringFlag{
- Name: "default-mounts-file",
- Usage: "Path to default mounts file",
- Hidden: true,
- },
- cli.StringSliceFlag{
- Name: "hooks-dir",
- Usage: "Set the OCI hooks directory path (may be set multiple times)",
- },
- cli.IntFlag{
- Name: "max-workers",
- Usage: "The maximum number of workers for parallel operations",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "namespace",
- Usage: "Set the libpod namespace, used to create separate views of the containers and pods on the system",
- Value: "",
- },
- cli.StringFlag{
- Name: "root",
- Usage: "Path to the root directory in which data, including images, is stored",
- },
- cli.StringFlag{
- Name: "runroot",
- Usage: "Path to the 'run directory' where all state information is stored",
- },
- cli.StringFlag{
- Name: "runtime",
- Usage: "Path to the OCI-compatible binary used to run containers, default is /usr/bin/runc",
- },
- cli.StringFlag{
- Name: "storage-driver, s",
- Usage: "Select which storage driver is used to manage storage of images and containers (default is overlay)",
- },
- cli.StringSliceFlag{
- Name: "storage-opt",
- Usage: "Used to pass an option to the storage driver",
- },
- cli.BoolFlag{
- Name: "syslog",
- Usage: "Output logging information to syslog as well as the console",
- },
+
+// Commands that the local client implements
+func getTrustSubCommands() []*cobra.Command {
+ return []*cobra.Command{
+ _setTrustCommand,
+ _showTrustCommand,
+ }
+}
+
+// Commands that the local client implements
+func getSystemSubCommands() []*cobra.Command {
+ return []*cobra.Command{
+ _pruneSystemCommand,
+ _renumberCommand,
}
}
diff --git a/cmd/podman/commands_remoteclient.go b/cmd/podman/commands_remoteclient.go
index 0adbd7b4c..081043b25 100644
--- a/cmd/podman/commands_remoteclient.go
+++ b/cmd/podman/commands_remoteclient.go
@@ -2,24 +2,53 @@
package main
-import "github.com/urfave/cli"
+import (
+ "github.com/spf13/cobra"
+)
-func getAppCommands() []cli.Command {
- return []cli.Command{}
+const remoteclient = true
+
+// commands that only the remoteclient implements
+func getMainCommands() []*cobra.Command {
+ return []*cobra.Command{}
+}
+
+// commands that only the remoteclient implements
+func getAppCommands() []*cobra.Command {
+ return []*cobra.Command{}
+}
+
+// commands that only the remoteclient implements
+func getImageSubCommands() []*cobra.Command {
+ return []*cobra.Command{}
+}
+
+// commands that only the remoteclient implements
+func getContainerSubCommands() []*cobra.Command {
+ return []*cobra.Command{}
+}
+
+// commands that only the remoteclient implements
+func getPodSubCommands() []*cobra.Command {
+ return []*cobra.Command{}
}
-func getImageSubCommands() []cli.Command {
- return []cli.Command{}
+// commands that only the remoteclient implements
+func getGenerateSubCommands() []*cobra.Command {
+ return []*cobra.Command{}
}
-func getContainerSubCommands() []cli.Command {
- return []cli.Command{}
+// commands that only the remoteclient implements
+func getPlaySubCommands() []*cobra.Command {
+ return []*cobra.Command{}
}
-func getSystemSubCommands() []cli.Command {
- return []cli.Command{}
+// commands that only the remoteclient implements
+func getTrustSubCommands() []*cobra.Command {
+ return []*cobra.Command{}
}
-func getMainAppFlags() []cli.Flag {
- return []cli.Flag{}
+// commands that only the remoteclient implements
+func getSystemSubCommands() []*cobra.Command {
+ return []*cobra.Command{}
}
diff --git a/cmd/podman/commit.go b/cmd/podman/commit.go
index 02ede4f73..d8ced0e36 100644
--- a/cmd/podman/commit.go
+++ b/cmd/podman/commit.go
@@ -2,6 +2,8 @@ package main
import (
"fmt"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
"io"
"os"
"strings"
@@ -13,57 +15,45 @@ import (
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
- "github.com/urfave/cli"
)
var (
- commitFlags = []cli.Flag{
- cli.StringSliceFlag{
- Name: "change, c",
- Usage: fmt.Sprintf("Apply the following possible instructions to the created image (default []): %s", strings.Join(libpod.ChangeCmds, " | ")),
- },
- cli.StringFlag{
- Name: "format, f",
- Usage: "`format` of the image manifest and metadata",
- Value: "oci",
- },
- cli.StringFlag{
- Name: "message, m",
- Usage: "Set commit message for imported image",
- },
- cli.StringFlag{
- Name: "author, a",
- Usage: "Set the author for the image committed",
- },
- cli.BoolTFlag{
- Name: "pause, p",
- Usage: "Pause container during commit",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress output",
- },
- }
+ commitCommand cliconfig.CommitValues
commitDescription = `Create an image from a container's changes.
Optionally tag the image created, set the author with the --author flag,
set the commit message with the --message flag,
and make changes to the instructions with the --change flag.`
- commitCommand = cli.Command{
- Name: "commit",
- Usage: "Create new image based on the changed container",
- Description: commitDescription,
- Flags: sortFlags(commitFlags),
- Action: commitCmd,
- ArgsUsage: "CONTAINER [REPOSITORY[:TAG]]",
- OnUsageError: usageErrorHandler,
+
+ _commitCommand = &cobra.Command{
+ Use: "commit",
+ Short: "Create new image based on the changed container",
+ Long: commitDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ commitCommand.InputArgs = args
+ commitCommand.GlobalFlags = MainGlobalOpts
+ return commitCmd(&commitCommand)
+ },
+ Example: `podman commit -q --message "committing container to image" reverent_golick image-commited
+ podman commit -q --author "firstName lastName" reverent_golick image-commited
+ podman commit -q --pause=false containerID image-commited`,
}
)
-func commitCmd(c *cli.Context) error {
- if err := validateFlags(c, commitFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+func init() {
+ commitCommand.Command = _commitCommand
+ commitCommand.SetUsageTemplate(UsageTemplate())
+ flags := commitCommand.Flags()
+ flags.StringSliceVarP(&commitCommand.Change, "change", "c", []string{}, fmt.Sprintf("Apply the following possible instructions to the created image (default []): %s", strings.Join(libpod.ChangeCmds, " | ")))
+ flags.StringVarP(&commitCommand.Format, "format", "f", "oci", "`Format` of the image manifest and metadata")
+ flags.StringVarP(&commitCommand.Message, "message", "m", "", "Set commit message for imported image")
+ flags.StringVarP(&commitCommand.Author, "author", "a", "", "Set the author for the image committed")
+ flags.BoolVarP(&commitCommand.Pause, "pause", "p", false, "Pause container during commit")
+ flags.BoolVarP(&commitCommand.Quiet, "quiet", "q", false, "Suppress output")
+
+}
+
+func commitCmd(c *cliconfig.CommitValues) error {
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -73,26 +63,26 @@ func commitCmd(c *cli.Context) error {
writer io.Writer
mimeType string
)
- args := c.Args()
+ args := c.InputArgs
if len(args) != 2 {
return errors.Errorf("you must provide a container name or ID and a target image name")
}
- switch c.String("format") {
+ switch c.Format {
case "oci":
mimeType = buildah.OCIv1ImageManifest
- if c.IsSet("message") || c.IsSet("m") {
+ if c.Flag("message").Changed {
return errors.Errorf("messages are only compatible with the docker image format (-f docker)")
}
case "docker":
mimeType = manifest.DockerV2Schema2MediaType
default:
- return errors.Errorf("unrecognized image format %q", c.String("format"))
+ return errors.Errorf("unrecognized image format %q", c.Format)
}
container := args[0]
reference := args[1]
- if c.IsSet("change") || c.IsSet("c") {
- for _, change := range c.StringSlice("change") {
+ if c.Flag("change").Changed {
+ for _, change := range c.Change {
splitChange := strings.Split(strings.ToUpper(change), "=")
if !util.StringInSlice(splitChange[0], libpod.ChangeCmds) {
return errors.Errorf("invalid syntax for --change: %s", change)
@@ -100,7 +90,7 @@ func commitCmd(c *cli.Context) error {
}
}
- if !c.Bool("quiet") {
+ if !c.Quiet {
writer = os.Stderr
}
ctr, err := runtime.LookupContainer(container)
@@ -117,10 +107,10 @@ func commitCmd(c *cli.Context) error {
}
options := libpod.ContainerCommitOptions{
CommitOptions: coptions,
- Pause: c.Bool("pause"),
- Message: c.String("message"),
- Changes: c.StringSlice("change"),
- Author: c.String("author"),
+ Pause: c.Pause,
+ Message: c.Message,
+ Changes: c.Change,
+ Author: c.Author,
}
newImage, err := ctr.Commit(getContext(), reference, options)
if err != nil {
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index 82d60d62e..e297f3921 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -3,35 +3,21 @@ package main
import (
"context"
"fmt"
+ "github.com/spf13/cobra"
"os"
- "reflect"
- "regexp"
- "sort"
"strings"
"github.com/containers/buildah"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/fatih/camelcase"
"github.com/pkg/errors"
- "github.com/urfave/cli"
)
var (
- stores = make(map[storage.Store]struct{})
- LatestFlag = cli.BoolFlag{
- Name: "latest, l",
- Usage: "Act on the latest container podman is aware of",
- }
- LatestPodFlag = cli.BoolFlag{
- Name: "latest, l",
- Usage: "Act on the latest pod podman is aware of",
- }
- WorkDirFlag = cli.StringFlag{
- Name: "workdir, w",
- Usage: "Working directory inside the container",
- }
+ stores = make(map[storage.Store]struct{})
)
const (
@@ -50,61 +36,25 @@ func shortID(id string) string {
return id
}
-func usageErrorHandler(context *cli.Context, err error, _ bool) error {
- cmd := context.App.Name
- if len(context.Command.Name) > 0 {
- cmd = cmd + " " + context.Command.Name
- }
- return fmt.Errorf("%s\nSee '%s --help'.", err, cmd)
-}
-
-func commandNotFoundHandler(context *cli.Context, command string) {
- fmt.Fprintf(os.Stderr, "Command %q not found.\nSee `%s --help`.\n", command, context.App.Name)
- os.Exit(exitCode)
-}
-
-// validateFlags searches for StringFlags or StringSlice flags that never had
-// a value set. This commonly occurs when the CLI mistakenly takes the next
-// option and uses it as a value.
-func validateFlags(c *cli.Context, flags []cli.Flag) error {
- for _, flag := range flags {
- switch reflect.TypeOf(flag).String() {
- case "cli.StringSliceFlag":
- {
- f := flag.(cli.StringSliceFlag)
- name := strings.Split(f.Name, ",")
- val := c.StringSlice(name[0])
- for _, v := range val {
- if ok, _ := regexp.MatchString("^-.+", v); ok {
- return errors.Errorf("option --%s requires a value", name[0])
- }
- }
- }
- case "cli.StringFlag":
- {
- f := flag.(cli.StringFlag)
- name := strings.Split(f.Name, ",")
- val := c.String(name[0])
- if ok, _ := regexp.MatchString("^-.+", val); ok {
- return errors.Errorf("option --%s requires a value", name[0])
- }
- }
- }
- }
- return nil
-}
-
// checkAllAndLatest checks that --all and --latest are used correctly
-func checkAllAndLatest(c *cli.Context) error {
- argLen := len(c.Args())
- if (c.Bool("all") || c.Bool("latest")) && argLen > 0 {
- return errors.Errorf("no arguments are needed with --all or --latest")
+func checkAllAndLatest(c *cobra.Command, args []string, ignoreArgLen bool) error {
+ argLen := len(args)
+ if c.Flags().Lookup("all") == nil || c.Flags().Lookup("latest") == nil {
+ return errors.New("unable to lookup values for 'latest' or 'all'")
}
- if c.Bool("all") && c.Bool("latest") {
+ all, _ := c.Flags().GetBool("all")
+ latest, _ := c.Flags().GetBool("latest")
+ if all && latest {
return errors.Errorf("--all and --latest cannot be used together")
}
- if argLen < 1 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("you must provide at least one pod name or id")
+ if ignoreArgLen {
+ return nil
+ }
+ if (all || latest) && argLen > 0 {
+ return errors.Errorf("no arguments are needed with --all or --latest")
+ }
+ if argLen < 1 && !all && !latest {
+ return errors.Errorf("you must provide at least one name or id")
}
return nil
}
@@ -117,7 +67,7 @@ func checkAllAndLatest(c *cli.Context) error {
// is desired a -1 can be used to get all containers. For a better
// error message, if the filter fails, a corresponding verb can be
// specified which will then appear in the error message.
-func getAllOrLatestContainers(c *cli.Context, runtime *libpod.Runtime, filterState libpod.ContainerStatus, verb string) ([]*libpod.Container, error) {
+func getAllOrLatestContainers(c *cliconfig.PodmanCommand, runtime *libpod.Runtime, filterState libpod.ContainerStatus, verb string) ([]*libpod.Container, error) {
var containers []*libpod.Container
var lastError error
var err error
@@ -142,7 +92,7 @@ func getAllOrLatestContainers(c *cli.Context, runtime *libpod.Runtime, filterSta
}
containers = append(containers, lastCtr)
} else {
- args := c.Args()
+ args := c.InputArgs
for _, i := range args {
container, err := runtime.LookupContainer(i)
if err != nil {
@@ -163,6 +113,9 @@ func getAllOrLatestContainers(c *cli.Context, runtime *libpod.Runtime, filterSta
// getContext returns a non-nil, empty context
func getContext() context.Context {
+ if Ctx != nil {
+ return Ctx
+ }
return context.TODO()
}
@@ -173,363 +126,367 @@ func getDefaultNetwork() string {
return "bridge"
}
-// Common flags shared between commands
-var createFlags = []cli.Flag{
- cli.StringSliceFlag{
- Name: "add-host",
- Usage: "Add a custom host-to-IP mapping (host:ip) (default [])",
- },
- cli.StringSliceFlag{
- Name: "annotation",
- Usage: "Add annotations to container (key:value) (default [])",
- },
- cli.StringSliceFlag{
- Name: "attach, a",
- Usage: "Attach to STDIN, STDOUT or STDERR (default [])",
- },
- cli.StringFlag{
- Name: "blkio-weight",
- Usage: "Block IO weight (relative weight) accepts a weight value between 10 and 1000.",
- },
- cli.StringSliceFlag{
- Name: "blkio-weight-device",
- Usage: "Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`)",
- },
- cli.StringSliceFlag{
- Name: "cap-add",
- Usage: "Add capabilities to the container",
- },
- cli.StringSliceFlag{
- Name: "cap-drop",
- Usage: "Drop capabilities from the container",
- },
- cli.StringFlag{
- Name: "cgroup-parent",
- Usage: "Optional parent cgroup for the container",
- },
- cli.StringFlag{
- Name: "cidfile",
- Usage: "Write the container ID to the file",
- },
- cli.StringFlag{
- Name: "conmon-pidfile",
- Usage: "Path to the file that will receive the PID of conmon",
- },
- cli.Uint64Flag{
- Name: "cpu-period",
- Usage: "Limit the CPU CFS (Completely Fair Scheduler) period",
- },
- cli.Int64Flag{
- Name: "cpu-quota",
- Usage: "Limit the CPU CFS (Completely Fair Scheduler) quota",
- },
- cli.Uint64Flag{
- Name: "cpu-rt-period",
- Usage: "Limit the CPU real-time period in microseconds",
- },
- cli.Int64Flag{
- Name: "cpu-rt-runtime",
- Usage: "Limit the CPU real-time runtime in microseconds",
- },
- cli.Uint64Flag{
- Name: "cpu-shares",
- Usage: "CPU shares (relative weight)",
- },
- cli.Float64Flag{
- Name: "cpus",
- Usage: "Number of CPUs. The default is 0.000 which means no limit",
- },
- cli.StringFlag{
- Name: "cpuset-cpus",
- Usage: "CPUs in which to allow execution (0-3, 0,1)",
- },
- cli.StringFlag{
- Name: "cpuset-mems",
- Usage: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
- },
- cli.BoolFlag{
- Name: "detach, d",
- Usage: "Run container in background and print container ID",
- },
- cli.StringFlag{
- Name: "detach-keys",
- Usage: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`",
- },
- cli.StringSliceFlag{
- Name: "device",
- Usage: "Add a host device to the container (default [])",
- },
- cli.StringSliceFlag{
- Name: "device-read-bps",
- Usage: "Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb)",
- },
- cli.StringSliceFlag{
- Name: "device-read-iops",
- Usage: "Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000)",
- },
- cli.StringSliceFlag{
- Name: "device-write-bps",
- Usage: "Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb)",
- },
- cli.StringSliceFlag{
- Name: "device-write-iops",
- Usage: "Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000)",
- },
- cli.StringSliceFlag{
- Name: "dns",
- Usage: "Set custom DNS servers",
- },
- cli.StringSliceFlag{
- Name: "dns-opt",
- Usage: "Set custom DNS options",
- },
- cli.StringSliceFlag{
- Name: "dns-search",
- Usage: "Set custom DNS search domains",
- },
- cli.StringFlag{
- Name: "entrypoint",
- Usage: "Overwrite the default ENTRYPOINT of the image",
- },
- cli.StringSliceFlag{
- Name: "env, e",
- Usage: "Set environment variables in container",
- },
- cli.StringSliceFlag{
- Name: "env-file",
- Usage: "Read in a file of environment variables",
- },
- cli.StringSliceFlag{
- Name: "expose",
- Usage: "Expose a port or a range of ports (default [])",
- },
- cli.StringSliceFlag{
- Name: "gidmap",
- Usage: "GID map to use for the user namespace",
- },
- cli.StringSliceFlag{
- Name: "group-add",
- Usage: "Add additional groups to join (default [])",
- },
- cli.BoolFlag{
- Name: "help",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "hostname, h",
- Usage: "Set container hostname",
- },
- cli.StringFlag{
- Name: "image-volume, builtin-volume",
- Usage: "Tells podman how to handle the builtin image volumes. The options are: 'bind', 'tmpfs', or 'ignore' (default 'bind')",
- Value: "bind",
- },
- cli.BoolFlag{
- Name: "init",
- Usage: "Run an init binary inside the container that forwards signals and reaps processes",
- },
- cli.StringFlag{
- Name: "init-path",
+func getCreateFlags(c *cliconfig.PodmanCommand) {
+
+ createFlags := c.Flags()
+
+ createFlags.StringSlice(
+ "add-host", []string{},
+ "Add a custom host-to-IP mapping (host:ip) (default [])",
+ )
+ createFlags.StringSlice(
+ "annotation", []string{},
+ "Add annotations to container (key:value) (default [])",
+ )
+ createFlags.StringSliceP(
+ "attach", "a", []string{},
+ "Attach to STDIN, STDOUT or STDERR (default [])",
+ )
+ createFlags.String(
+ "blkio-weight", "",
+ "Block IO weight (relative weight) accepts a weight value between 10 and 1000.",
+ )
+ createFlags.StringSlice(
+ "blkio-weight-device", []string{},
+ "Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`)",
+ )
+ createFlags.StringSlice(
+ "cap-add", []string{},
+ "Add capabilities to the container",
+ )
+ createFlags.StringSlice(
+ "cap-drop", []string{},
+ "Drop capabilities from the container",
+ )
+ createFlags.String(
+ "cgroup-parent", "",
+ "Optional parent cgroup for the container",
+ )
+ createFlags.String(
+ "cidfile", "",
+ "Write the container ID to the file",
+ )
+ createFlags.String(
+ "conmon-pidfile", "",
+ "Path to the file that will receive the PID of conmon",
+ )
+ createFlags.Uint64(
+ "cpu-period", 0,
+ "Limit the CPU CFS (Completely Fair Scheduler) period",
+ )
+ createFlags.Int64(
+ "cpu-quota", 0,
+ "Limit the CPU CFS (Completely Fair Scheduler) quota",
+ )
+ createFlags.Uint64(
+ "cpu-rt-period", 0,
+ "Limit the CPU real-time period in microseconds",
+ )
+ createFlags.Int64(
+ "cpu-rt-runtime", 0,
+ "Limit the CPU real-time runtime in microseconds",
+ )
+ createFlags.Uint64(
+ "cpu-shares", 0,
+ "CPU shares (relative weight)",
+ )
+ createFlags.Float64(
+ "cpus", 0,
+ "Number of CPUs. The default is 0.000 which means no limit",
+ )
+ createFlags.String(
+ "cpuset-cpus", "",
+ "CPUs in which to allow execution (0-3, 0,1)",
+ )
+ createFlags.String(
+ "cpuset-mems", "",
+ "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
+ )
+ createFlags.BoolP(
+ "detach", "d", false,
+ "Run container in background and print container ID",
+ )
+ createFlags.String(
+ "detach-keys", "",
+ "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`",
+ )
+ createFlags.StringSlice(
+ "device", []string{},
+ "Add a host device to the container (default [])",
+ )
+ createFlags.StringSlice(
+ "device-read-bps", []string{},
+ "Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb)",
+ )
+ createFlags.StringSlice(
+ "device-read-iops", []string{},
+ "Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000)",
+ )
+ createFlags.StringSlice(
+ "device-write-bps", []string{},
+ "Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb)",
+ )
+ createFlags.StringSlice(
+ "device-write-iops", []string{},
+ "Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000)",
+ )
+ createFlags.StringSlice(
+ "dns", []string{},
+ "Set custom DNS servers",
+ )
+ createFlags.StringSlice(
+ "dns-opt", []string{},
+ "Set custom DNS options",
+ )
+ createFlags.StringSlice(
+ "dns-search", []string{},
+ "Set custom DNS search domains",
+ )
+ createFlags.String(
+ "entrypoint", "",
+ "Overwrite the default ENTRYPOINT of the image",
+ )
+ createFlags.StringSliceP(
+ "env", "e", []string{},
+ "Set environment variables in container",
+ )
+ createFlags.StringSlice(
+ "env-file", []string{},
+ "Read in a file of environment variables",
+ )
+ createFlags.StringSlice(
+ "expose", []string{},
+ "Expose a port or a range of ports (default [])",
+ )
+ createFlags.StringSlice(
+ "gidmap", []string{},
+ "GID map to use for the user namespace",
+ )
+ createFlags.StringSlice(
+ "group-add", []string{},
+ "Add additional groups to join (default [])",
+ )
+ createFlags.Bool(
+ "help", false, "",
+ )
+
+ createFlags.StringP(
+ "hostname", "h", "",
+ "Set container hostname",
+ )
+ createFlags.String(
+ "image-volume", "bind",
+ "Tells podman how to handle the builtin image volumes. The options are: 'bind', 'tmpfs', or 'ignore' (default 'bind')",
+ )
+ createFlags.Bool(
+ "init", false,
+ "Run an init binary inside the container that forwards signals and reaps processes",
+ )
+ createFlags.String(
+ "init-path", "",
// Do not use the Value field for setting the default value to determine user input (i.e., non-empty string)
- Usage: fmt.Sprintf("Path to the container-init binary (default: %q)", libpod.DefaultInitPath),
- },
- cli.BoolFlag{
- Name: "interactive, i",
- Usage: "Keep STDIN open even if not attached",
- },
- cli.StringFlag{
- Name: "ip",
- Usage: "Specify a static IPv4 address for the container",
- },
- cli.StringFlag{
- Name: "ipc",
- Usage: "IPC namespace to use",
- },
- cli.StringFlag{
- Name: "kernel-memory",
- Usage: "Kernel memory limit (format: `<number>[<unit>]`, where unit = b, k, m or g)",
- },
- cli.StringSliceFlag{
- Name: "label",
- Usage: "Set metadata on container (default [])",
- },
- cli.StringSliceFlag{
- Name: "label-file",
- Usage: "Read in a line delimited file of labels (default [])",
- },
- cli.StringFlag{
- Name: "log-driver",
- Usage: "Logging driver for the container",
- },
- cli.StringSliceFlag{
- Name: "log-opt",
- Usage: "Logging driver options (default [])",
- },
- cli.StringFlag{
- Name: "mac-address",
- Usage: "Container MAC address (e.g. 92:d0:c6:0a:29:33), not currently supported",
- },
- cli.StringFlag{
- Name: "memory, m",
- Usage: "Memory limit (format: <number>[<unit>], where unit = b, k, m or g)",
- },
- cli.StringFlag{
- Name: "memory-reservation",
- Usage: "Memory soft limit (format: <number>[<unit>], where unit = b, k, m or g)",
- },
- cli.StringFlag{
- Name: "memory-swap",
- Usage: "Swap limit equal to memory plus swap: '-1' to enable unlimited swap",
- },
- cli.Int64Flag{
- Name: "memory-swappiness",
- Usage: "Tune container memory swappiness (0 to 100) (default -1)",
- Value: -1,
- },
- cli.StringFlag{
- Name: "name",
- Usage: "Assign a name to the container",
- },
- cli.StringFlag{
- Name: "net, network",
- Usage: "Connect a container to a network",
- Value: getDefaultNetwork(),
- },
- cli.BoolFlag{
- Name: "oom-kill-disable",
- Usage: "Disable OOM Killer",
- },
- cli.StringFlag{
- Name: "oom-score-adj",
- Usage: "Tune the host's OOM preferences (-1000 to 1000)",
- },
- cli.StringFlag{
- Name: "pid",
- Usage: "PID namespace to use",
- },
- cli.Int64Flag{
- Name: "pids-limit",
- Usage: "Tune container pids limit (set -1 for unlimited)",
- },
- cli.StringFlag{
- Name: "pod",
- Usage: "Run container in an existing pod",
- },
- cli.BoolFlag{
- Name: "privileged",
- Usage: "Give extended privileges to container",
- },
- cli.StringSliceFlag{
- Name: "publish, p",
- Usage: "Publish a container's port, or a range of ports, to the host (default [])",
- },
- cli.BoolFlag{
- Name: "publish-all, P",
- Usage: "Publish all exposed ports to random ports on the host interface",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress output information when pulling images",
- },
- cli.BoolFlag{
- Name: "read-only",
- Usage: "Make containers root filesystem read-only",
- },
- cli.StringFlag{
- Name: "restart",
- Usage: "Restart is not supported. Please use a systemd unit file for restart",
- },
- cli.BoolFlag{
- Name: "rm",
- Usage: "Remove container (and pod if created) after exit",
- },
- cli.BoolFlag{
- Name: "rootfs",
- Usage: "The first argument is not an image but the rootfs to the exploded container",
- },
- cli.StringSliceFlag{
- Name: "security-opt",
- Usage: "Security Options (default [])",
- },
- cli.StringFlag{
- Name: "shm-size",
- Usage: "Size of `/dev/shm`. The format is `<number><unit>`.",
- Value: "65536k",
- },
- cli.StringFlag{
- Name: "stop-signal",
- Usage: "Signal to stop a container. Default is SIGTERM",
- },
- cli.IntFlag{
- Name: "stop-timeout",
- Usage: "Timeout (in seconds) to stop a container. Default is 10",
- Value: libpod.CtrRemoveTimeout,
- },
- cli.StringSliceFlag{
- Name: "storage-opt",
- Usage: "Storage driver options per container (default [])",
- },
- cli.StringFlag{
- Name: "subgidname",
- Usage: "Name of range listed in /etc/subgid for use in user namespace",
- },
- cli.StringFlag{
- Name: "subuidname",
- Usage: "Name of range listed in /etc/subuid for use in user namespace",
- },
+ fmt.Sprintf("Path to the container-init binary (default: %q)", libpod.DefaultInitPath),
+ )
+ createFlags.BoolP(
+ "interactive", "i", false,
+ "Keep STDIN open even if not attached",
+ )
+ createFlags.String(
+ "ip", "",
+ "Specify a static IPv4 address for the container",
+ )
+ createFlags.String(
+ "ipc", "",
+ "IPC namespace to use",
+ )
+ createFlags.String(
+ "kernel-memory", "",
+ "Kernel memory limit (format: `<number>[<unit>]`, where unit = b, k, m or g)",
+ )
+ createFlags.StringSlice(
+ "label", []string{},
+ "Set metadata on container (default [])",
+ )
+ createFlags.StringSlice(
+ "label-file", []string{},
+ "Read in a line delimited file of labels (default [])",
+ )
+ createFlags.String(
+ "log-driver", "",
+ "Logging driver for the container",
+ )
+ createFlags.StringSlice(
+ "log-opt", []string{},
+ "Logging driver options (default [])",
+ )
+ createFlags.String(
+ "mac-address", "",
+ "Container MAC address (e.g. 92:d0:c6:0a:29:33), not currently supported",
+ )
+ createFlags.StringP(
+ "memory", "m", "",
+ "Memory limit (format: <number>[<unit>], where unit = b, k, m or g)",
+ )
+ createFlags.String(
+ "memory-reservation", "",
+ "Memory soft limit (format: <number>[<unit>], where unit = b, k, m or g)",
+ )
+ createFlags.String(
+ "memory-swap", "",
+ "Swap limit equal to memory plus swap: '-1' to enable unlimited swap",
+ )
+ createFlags.Int64(
+ "memory-swappiness", -1,
+ "Tune container memory swappiness (0 to 100) (default -1)",
+ )
+ createFlags.String(
+ "name", "",
+ "Assign a name to the container",
+ )
+ createFlags.String(
+ "net", getDefaultNetwork(),
+ "Connect a container to a network",
+ )
+ createFlags.String(
+ "network", getDefaultNetwork(),
+ "Connect a container to a network",
+ )
+ createFlags.Bool(
+ "oom-kill-disable", false,
+ "Disable OOM Killer",
+ )
+ createFlags.Int(
+ "oom-score-adj", 0,
+ "Tune the host's OOM preferences (-1000 to 1000)",
+ )
+ createFlags.String(
+ "pid", "",
+ "PID namespace to use",
+ )
+ createFlags.Int64(
+ "pids-limit", 0,
+ "Tune container pids limit (set -1 for unlimited)",
+ )
+ createFlags.String(
+ "pod", "",
+ "Run container in an existing pod",
+ )
+ createFlags.Bool(
+ "privileged", false,
+ "Give extended privileges to container",
+ )
+ createFlags.StringSliceP(
+ "publish", "p", []string{},
+ "Publish a container's port, or a range of ports, to the host (default [])",
+ )
+ createFlags.BoolP(
+ "publish-all", "P", false,
+ "Publish all exposed ports to random ports on the host interface",
+ )
+ createFlags.BoolP(
+ "quiet", "q", false,
+ "Suppress output information when pulling images",
+ )
+ createFlags.Bool(
+ "read-only", false,
+ "Make containers root filesystem read-only",
+ )
+ createFlags.String(
+ "restart", "",
+ "Restart is not supported. Please use a systemd unit file for restart",
+ )
+ createFlags.Bool(
+ "rm", false,
+ "Remove container (and pod if created) after exit",
+ )
+ createFlags.Bool(
+ "rootfs", false,
+ "The first argument is not an image but the rootfs to the exploded container",
+ )
+ createFlags.StringArray(
+ "security-opt", []string{},
+ "Security Options (default [])",
+ )
+ createFlags.String(
+ "shm-size", "65536k",
+ "Size of `/dev/shm`. The format is `<number><unit>`",
+ )
+ createFlags.String(
+ "stop-signal", "",
+ "Signal to stop a container. Default is SIGTERM",
+ )
+ createFlags.Int(
+ "stop-timeout", libpod.CtrRemoveTimeout,
+ "Timeout (in seconds) to stop a container. Default is 10",
+ )
+ createFlags.StringSlice(
+ "storage-opt", []string{},
+ "Storage driver options per container (default [])",
+ )
+ createFlags.String(
+ "subgidname", "",
+ "Name of range listed in /etc/subgid for use in user namespace",
+ )
+ createFlags.String(
+ "subuidname", "",
+ "Name of range listed in /etc/subuid for use in user namespace",
+ )
- cli.StringSliceFlag{
- Name: "sysctl",
- Usage: "Sysctl options (default [])",
- },
- cli.BoolTFlag{
- Name: "systemd",
- Usage: "Run container in systemd mode if the command executable is systemd or init",
- },
- cli.StringSliceFlag{
- Name: "tmpfs",
- Usage: "Mount a temporary filesystem (`tmpfs`) into a container (default [])",
- },
- cli.BoolFlag{
- Name: "tty, t",
- Usage: "Allocate a pseudo-TTY for container",
- },
- cli.StringSliceFlag{
- Name: "uidmap",
- Usage: "UID map to use for the user namespace",
- },
- cli.StringSliceFlag{
- Name: "ulimit",
- Usage: "Ulimit options (default [])",
- },
- cli.StringFlag{
- Name: "user, u",
- Usage: "Username or UID (format: <name|uid>[:<group|gid>])",
- },
- cli.StringFlag{
- Name: "userns",
- Usage: "User namespace to use",
- },
- cli.StringFlag{
- Name: "uts",
- Usage: "UTS namespace to use",
- },
- cli.StringSliceFlag{
- Name: "mount",
- Usage: "Attach a filesystem mount to the container (default [])",
- },
- cli.StringSliceFlag{
- Name: "volume, v",
- Usage: "Bind mount a volume into the container (default [])",
- },
- cli.StringSliceFlag{
- Name: "volumes-from",
- Usage: "Mount volumes from the specified container(s) (default [])",
- },
- WorkDirFlag,
+ createFlags.StringSlice(
+ "sysctl", []string{},
+ "Sysctl options (default [])",
+ )
+ createFlags.Bool(
+ "systemd", true,
+ "Run container in systemd mode if the command executable is systemd or init",
+ )
+ createFlags.StringSlice(
+ "tmpfs", []string{},
+ "Mount a temporary filesystem (`tmpfs`) into a container (default [])",
+ )
+ createFlags.BoolP(
+ "tty", "t", false,
+ "Allocate a pseudo-TTY for container",
+ )
+ createFlags.StringSlice(
+ "uidmap", []string{},
+ "UID map to use for the user namespace",
+ )
+ createFlags.StringSlice(
+ "ulimit", []string{},
+ "Ulimit options (default [])",
+ )
+ createFlags.StringP(
+ "user", "u", "",
+ "Username or UID (format: <name|uid>[:<group|gid>])",
+ )
+ createFlags.String(
+ "userns", "",
+ "User namespace to use",
+ )
+ createFlags.String(
+ "uts", "",
+ "UTS namespace to use",
+ )
+ createFlags.StringArray(
+ "mount", []string{},
+ "Attach a filesystem mount to the container (default [])",
+ )
+ createFlags.StringArrayP(
+ "volume", "v", []string{},
+ "Bind mount a volume into the container (default [])",
+ )
+ createFlags.StringSlice(
+ "volumes-from", []string{},
+ "Mount volumes from the specified container(s) (default [])",
+ )
+ createFlags.StringP(
+ "workdir", "w", "",
+ "Working directory inside the container",
+ )
}
-func getFormat(c *cli.Context) (string, error) {
+func getFormat(c *cliconfig.PodmanCommand) (string, error) {
format := strings.ToLower(c.String("format"))
if strings.HasPrefix(format, buildah.OCI) {
return buildah.OCIv1ImageManifest, nil
@@ -541,13 +498,6 @@ func getFormat(c *cli.Context) (string, error) {
return "", errors.Errorf("unrecognized image type %q", format)
}
-func sortFlags(flags []cli.Flag) []cli.Flag {
- sort.Slice(flags, func(i, j int) bool {
- return strings.Compare(flags[i].GetName(), flags[j].GetName()) < 0
- })
- return flags
-}
-
func getAuthFile(authfile string) string {
if authfile != "" {
return authfile
@@ -562,3 +512,27 @@ func scrubServer(server string) string {
server = strings.TrimPrefix(server, "https://")
return strings.TrimPrefix(server, "http://")
}
+
+// UsageTemplate returns the usage template for podman commands
+// This blocks the desplaying of the global options. The main podman
+// command should not use this.
+func UsageTemplate() string {
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+ {{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+{{end}}
+`
+}
diff --git a/cmd/podman/common_test.go b/cmd/podman/common_test.go
index 042568d7e..a24173003 100644
--- a/cmd/podman/common_test.go
+++ b/cmd/podman/common_test.go
@@ -3,34 +3,8 @@ package main
import (
"os/user"
"testing"
-
- "flag"
-
- "github.com/urfave/cli"
)
-func TestGetStore(t *testing.T) {
- t.Skip("FIX THIS!")
-
- //cmd/podman/common_test.go:27: cannot use c (type *cli.Context) as type *libkpod.Config in argument to getStore
-
- // Make sure the tests are running as root
- skipTestIfNotRoot(t)
-
- set := flag.NewFlagSet("test", 0)
- globalSet := flag.NewFlagSet("test", 0)
- globalSet.String("root", "", "path to the root directory in which data, including images, is stored")
- globalCtx := cli.NewContext(nil, globalSet, nil)
- command := cli.Command{Name: "imagesCommand"}
- c := cli.NewContext(nil, set, globalCtx)
- c.Command = command
-
- //_, err := getStore(c)
- //if err != nil {
- //t.Error(err)
- //}
-}
-
func skipTestIfNotRoot(t *testing.T) {
u, err := user.Current()
if err != nil {
diff --git a/cmd/podman/container.go b/cmd/podman/container.go
index 29300a6a4..d2450fdd3 100644
--- a/cmd/podman/container.go
+++ b/cmd/podman/container.go
@@ -1,30 +1,28 @@
package main
import (
- "sort"
-
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
-var (
- containerSubCommands = []cli.Command{
- exportCommand,
- inspectCommand,
- }
- containerDescription = "Manage containers"
- containerCommand = cli.Command{
- Name: "container",
- Usage: "Manage Containers",
- Description: containerDescription,
- ArgsUsage: "",
- Subcommands: getContainerSubCommandsSorted(),
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
- }
-)
+var containerDescription = "Manage containers"
+var containerCommand = cliconfig.PodmanCommand{
+ Command: &cobra.Command{
+ Use: "container",
+ Short: "Manage Containers",
+ Long: containerDescription,
+ TraverseChildren: true,
+ },
+}
+
+// Commands that are universally implemented.
+var containerCommands = []*cobra.Command{
+ _containerExistsCommand,
+}
-func getContainerSubCommandsSorted() []cli.Command {
- containerSubCommands = append(containerSubCommands, getContainerSubCommands()...)
- sort.Sort(commandSortedAlpha{containerSubCommands})
- return containerSubCommands
+func init() {
+ containerCommand.AddCommand(containerCommands...)
+ containerCommand.AddCommand(getContainerSubCommands()...)
+ containerCommand.SetUsageTemplate(UsageTemplate())
+ rootCmd.AddCommand(containerCommand.Command)
}
diff --git a/cmd/podman/containers_prune.go b/cmd/podman/containers_prune.go
index 09141e9a3..6e4960429 100644
--- a/cmd/podman/containers_prune.go
+++ b/cmd/podman/containers_prune.go
@@ -3,31 +3,42 @@ package main
import (
"context"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ pruneContainersCommand cliconfig.PruneContainersValues
pruneContainersDescription = `
podman container prune
Removes all exited containers
`
-
- pruneContainersCommand = cli.Command{
- Name: "prune",
- Usage: "Remove all stopped containers",
- Description: pruneContainersDescription,
- Action: pruneContainersCmd,
- OnUsageError: usageErrorHandler,
+ _pruneContainersCommand = &cobra.Command{
+ Use: "prune",
+ Short: "Remove all stopped containers",
+ Long: pruneContainersDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ pruneContainersCommand.InputArgs = args
+ pruneContainersCommand.GlobalFlags = MainGlobalOpts
+ return pruneContainersCmd(&pruneContainersCommand)
+ },
}
)
-func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force bool) error {
+func init() {
+ pruneContainersCommand.Command = _pruneContainersCommand
+ pruneContainersCommand.SetUsageTemplate(UsageTemplate())
+ flags := pruneContainersCommand.Flags()
+ flags.BoolVarP(&pruneContainersCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
+}
+
+func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force, volumes bool) error {
var deleteFuncs []shared.ParallelWorkerInput
filter := func(c *libpod.Container) bool {
@@ -47,7 +58,7 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
for _, container := range delContainers {
con := container
f := func() error {
- return runtime.RemoveContainer(ctx, con, force)
+ return runtime.RemoveContainer(ctx, con, force, volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
@@ -60,8 +71,8 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
return printParallelOutput(deleteErrors, errCount)
}
-func pruneContainersCmd(c *cli.Context) error {
- runtime, err := adapter.GetRuntime(c)
+func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -69,9 +80,9 @@ func pruneContainersCmd(c *cli.Context) error {
maxWorkers := shared.Parallelize("rm")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"))
+ return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"), c.Bool("volumes"))
}
diff --git a/cmd/podman/cp.go b/cmd/podman/cp.go
new file mode 100644
index 000000000..d9f230b67
--- /dev/null
+++ b/cmd/podman/cp.go
@@ -0,0 +1,291 @@
+package main
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/containers/buildah/util"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/chrootuser"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/idtools"
+ digest "github.com/opencontainers/go-digest"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var (
+ cpCommand cliconfig.CpValues
+
+ cpDescription = "Copy files/folders between a container and the local filesystem"
+ _cpCommand = &cobra.Command{
+ Use: "cp",
+ Short: "Copy files/folders between a container and the local filesystem",
+ Long: cpDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ cpCommand.InputArgs = args
+ cpCommand.GlobalFlags = MainGlobalOpts
+ return cpCmd(&cpCommand)
+ },
+ Example: "[CONTAINER:]SRC_PATH [CONTAINER:]DEST_PATH",
+ }
+)
+
+func init() {
+ cpCommand.Command = _cpCommand
+ rootCmd.AddCommand(cpCommand.Command)
+}
+
+func cpCmd(c *cliconfig.CpValues) error {
+ args := c.InputArgs
+ if len(args) != 2 {
+ return errors.Errorf("you must provide a source path and a destination path")
+ }
+ if os.Geteuid() != 0 {
+ rootless.SetSkipStorageSetup(true)
+ }
+
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ return copyBetweenHostAndContainer(runtime, args[0], args[1])
+}
+
+func copyBetweenHostAndContainer(runtime *libpod.Runtime, src string, dest string) error {
+
+ srcCtr, srcPath := parsePath(runtime, src)
+ destCtr, destPath := parsePath(runtime, dest)
+
+ if (srcCtr == nil && destCtr == nil) || (srcCtr != nil && destCtr != nil) {
+ return errors.Errorf("invalid arguments %s, %s you must use just one container", src, dest)
+ }
+
+ if len(srcPath) == 0 || len(destPath) == 0 {
+ return errors.Errorf("invalid arguments %s, %s you must specify paths", src, dest)
+ }
+ ctr := srcCtr
+ isFromHostToCtr := (ctr == nil)
+ if isFromHostToCtr {
+ ctr = destCtr
+ }
+
+ if os.Geteuid() != 0 {
+ s, err := ctr.State()
+ if err != nil {
+ return err
+ }
+ var became bool
+ var ret int
+ if s == libpod.ContainerStateRunning || s == libpod.ContainerStatePaused {
+ data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
+ if err != nil {
+ return errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
+ }
+ conmonPid, err := strconv.Atoi(string(data))
+ if err != nil {
+ return errors.Wrapf(err, "cannot parse PID %q", data)
+ }
+ became, ret, err = rootless.JoinDirectUserAndMountNS(uint(conmonPid))
+ } else {
+ became, ret, err = rootless.BecomeRootInUserNS()
+ }
+ if err != nil {
+ return err
+ }
+ if became {
+ os.Exit(ret)
+ }
+ }
+
+ mountPoint, err := ctr.Mount()
+ if err != nil {
+ return err
+ }
+ defer ctr.Unmount(false)
+ user, err := getUser(mountPoint, ctr.User())
+ if err != nil {
+ return err
+ }
+ idMappingOpts, err := ctr.IDMappings()
+ if err != nil {
+ return errors.Wrapf(err, "error getting IDMappingOptions")
+ }
+ containerOwner := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
+ hostUID, hostGID, err := util.GetHostIDs(convertIDMap(idMappingOpts.UIDMap), convertIDMap(idMappingOpts.GIDMap), user.UID, user.GID)
+ if err != nil {
+ return err
+ }
+
+ hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
+
+ var glob []string
+ if isFromHostToCtr {
+ if filepath.IsAbs(destPath) {
+ destPath = filepath.Join(mountPoint, destPath)
+
+ } else {
+ if err = idtools.MkdirAllAndChownNew(filepath.Join(mountPoint, ctr.WorkingDir()), 0755, hostOwner); err != nil {
+ return errors.Wrapf(err, "error creating directory %q", destPath)
+ }
+ destPath = filepath.Join(mountPoint, ctr.WorkingDir(), destPath)
+ }
+ } else {
+ if filepath.IsAbs(srcPath) {
+ srcPath = filepath.Join(mountPoint, srcPath)
+ } else {
+ srcPath = filepath.Join(mountPoint, ctr.WorkingDir(), srcPath)
+ }
+ }
+ glob, err = filepath.Glob(srcPath)
+ if err != nil {
+ return errors.Wrapf(err, "invalid glob %q", srcPath)
+ }
+ if len(glob) == 0 {
+ glob = append(glob, srcPath)
+ }
+ if !filepath.IsAbs(destPath) {
+ dir, err := os.Getwd()
+ if err != nil {
+ return errors.Wrapf(err, "err getting current working directory")
+ }
+ destPath = filepath.Join(dir, destPath)
+ }
+
+ var lastError error
+ for _, src := range glob {
+ err := copy(src, destPath, dest, idMappingOpts, &containerOwner)
+ if lastError != nil {
+ logrus.Error(lastError)
+ }
+ lastError = err
+ }
+ return lastError
+}
+
+func getUser(mountPoint string, userspec string) (specs.User, error) {
+ uid, gid, err := chrootuser.GetUser(mountPoint, userspec)
+ u := specs.User{
+ UID: uid,
+ GID: gid,
+ Username: userspec,
+ }
+ if !strings.Contains(userspec, ":") {
+ groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
+ if err2 != nil {
+ if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ err = err2
+ }
+ } else {
+ u.AdditionalGids = groups
+ }
+
+ }
+ return u, err
+}
+
+func parsePath(runtime *libpod.Runtime, path string) (*libpod.Container, string) {
+ pathArr := strings.SplitN(path, ":", 2)
+ if len(pathArr) == 2 {
+ ctr, err := runtime.LookupContainer(pathArr[0])
+ if err == nil {
+ return ctr, pathArr[1]
+ }
+ }
+ return nil, path
+}
+
+func getPathInfo(path string) (string, os.FileInfo, error) {
+ path, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error evaluating symlinks %q", path)
+ }
+ srcfi, err := os.Stat(path)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error reading path %q", path)
+ }
+ return path, srcfi, nil
+}
+
+func copy(src, destPath, dest string, idMappingOpts storage.IDMappingOptions, chownOpts *idtools.IDPair) error {
+ srcPath, err := filepath.EvalSymlinks(src)
+ if err != nil {
+ return errors.Wrapf(err, "error evaluating symlinks %q", srcPath)
+ }
+
+ srcPath, srcfi, err := getPathInfo(srcPath)
+ if err != nil {
+ return err
+ }
+ destdir := destPath
+ if !srcfi.IsDir() && !strings.HasSuffix(dest, string(os.PathSeparator)) {
+ destdir = filepath.Dir(destPath)
+ }
+ if err = os.MkdirAll(destdir, 0755); err != nil {
+ return errors.Wrapf(err, "error creating directory %q", destdir)
+ }
+
+ // return functions for copying items
+ copyFileWithTar := chrootarchive.CopyFileWithTarAndChown(chownOpts, digest.Canonical.Digester().Hash(), idMappingOpts.UIDMap, idMappingOpts.GIDMap)
+ copyWithTar := chrootarchive.CopyWithTarAndChown(chownOpts, digest.Canonical.Digester().Hash(), idMappingOpts.UIDMap, idMappingOpts.GIDMap)
+ untarPath := chrootarchive.UntarPathAndChown(chownOpts, digest.Canonical.Digester().Hash(), idMappingOpts.UIDMap, idMappingOpts.GIDMap)
+
+ if srcfi.IsDir() {
+
+ logrus.Debugf("copying %q to %q", srcPath+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
+ if err = copyWithTar(srcPath, destPath); err != nil {
+ return errors.Wrapf(err, "error copying %q to %q", srcPath, dest)
+ }
+ return nil
+ }
+ if !archive.IsArchivePath(srcPath) {
+ // This srcPath is a file, and either it's not an
+ // archive, or we don't care whether or not it's an
+ // archive.
+ destfi, err := os.Stat(destPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "failed to get stat of dest path %s", destPath)
+ }
+ }
+ if destfi != nil && destfi.IsDir() {
+ destPath = filepath.Join(destPath, filepath.Base(srcPath))
+ }
+ // Copy the file, preserving attributes.
+ logrus.Debugf("copying %q to %q", srcPath, destPath)
+ if err = copyFileWithTar(srcPath, destPath); err != nil {
+ return errors.Wrapf(err, "error copying %q to %q", srcPath, destPath)
+ }
+ return nil
+ }
+ // We're extracting an archive into the destination directory.
+ logrus.Debugf("extracting contents of %q into %q", srcPath, destPath)
+ if err = untarPath(srcPath, destPath); err != nil {
+ return errors.Wrapf(err, "error extracting %q into %q", srcPath, destPath)
+ }
+ return nil
+}
+
+func convertIDMap(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxIDMapping) {
+ for _, idmap := range idMaps {
+ tempIDMap := specs.LinuxIDMapping{
+ ContainerID: uint32(idmap.ContainerID),
+ HostID: uint32(idmap.HostID),
+ Size: uint32(idmap.Size),
+ }
+ convertedIDMap = append(convertedIDMap, tempIDMap)
+ }
+ return convertedIDMap
+}
diff --git a/cmd/podman/create.go b/cmd/podman/create.go
index 2d85abd35..868f90d54 100644
--- a/cmd/podman/create.go
+++ b/cmd/podman/create.go
@@ -12,6 +12,7 @@ import (
"strings"
"syscall"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
@@ -27,39 +28,56 @@ import (
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ createCommand cliconfig.CreateValues
+ createDescription = "Creates a new container from the given image or" +
+ " storage and prepares it for running the specified command. The" +
+ " container ID is then printed to stdout. You can then start it at" +
+ " any time with the podman start <container_id> command. The container" +
+ " will be created with the initial state 'created'."
+ _createCommand = &cobra.Command{
+ Use: "create",
+ Short: "Create but do not start a container",
+ Long: createDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ createCommand.InputArgs = args
+ createCommand.GlobalFlags = MainGlobalOpts
+ return createCmd(&createCommand)
+ },
+ Example: `podman create alpine ls
+ podman create --annotation HELLO=WORLD alpine ls
+ podman create -t -i --name myctr alpine ls`,
+ }
+
defaultEnvVariables = map[string]string{
"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM": "xterm",
}
)
-var createDescription = "Creates a new container from the given image or" +
- " storage and prepares it for running the specified command. The" +
- " container ID is then printed to stdout. You can then start it at" +
- " any time with the podman start <container_id> command. The container" +
- " will be created with the initial state 'created'."
-
-var createCommand = cli.Command{
- Name: "create",
- Usage: "Create but do not start a container",
- Description: createDescription,
- Flags: sortFlags(createFlags),
- Action: createCmd,
- ArgsUsage: "IMAGE [COMMAND [ARG...]]",
- HideHelp: true,
- SkipArgReorder: true,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+func init() {
+ createCommand.PodmanCommand.Command = _createCommand
+ createCommand.SetUsageTemplate(UsageTemplate())
+
+ getCreateFlags(&createCommand.PodmanCommand)
+ flags := createCommand.Flags()
+ flags.SetInterspersed(true)
+
}
-func createCmd(c *cli.Context) error {
- if err := createInit(c); err != nil {
+func createCmd(c *cliconfig.CreateValues) error {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "createCmd")
+ defer span.Finish()
+ }
+
+ if err := createInit(&c.PodmanCommand); err != nil {
return err
}
@@ -67,13 +85,13 @@ func createCmd(c *cli.Context) error {
rootless.SetSkipStorageSetup(true)
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- ctr, _, err := createContainer(c, runtime)
+ ctr, _, err := createContainer(&c.PodmanCommand, runtime)
if err != nil {
return err
}
@@ -82,33 +100,33 @@ func createCmd(c *cli.Context) error {
return nil
}
-func createInit(c *cli.Context) error {
- // TODO should allow user to create based off a directory on the host not just image
- // Need CLI support for this
+func createInit(c *cliconfig.PodmanCommand) error {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "createInit")
+ defer span.Finish()
+ }
// Docker-compatibility: the "-h" flag for run/create is reserved for
// the hostname (see https://github.com/containers/libpod/issues/1367).
- if c.Bool("help") {
- cli.ShowCommandHelpAndExit(c, "run", 0)
- }
-
- if err := validateFlags(c, createFlags); err != nil {
- return err
- }
- if len(c.Args()) < 1 {
+ if len(c.InputArgs) < 1 {
return errors.Errorf("image name or ID is required")
}
return nil
}
-func createContainer(c *cli.Context, runtime *libpod.Runtime) (*libpod.Container, *cc.CreateConfig, error) {
+func createContainer(c *cliconfig.PodmanCommand, runtime *libpod.Runtime) (*libpod.Container, *cc.CreateConfig, error) {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "createContainer")
+ defer span.Finish()
+ }
+
rtc := runtime.GetConfig()
ctx := getContext()
rootfs := ""
if c.Bool("rootfs") {
- rootfs = c.Args()[0]
+ rootfs = c.InputArgs[0]
}
var err error
@@ -134,7 +152,7 @@ func createContainer(c *cli.Context, runtime *libpod.Runtime) (*libpod.Container
writer = os.Stderr
}
- newImage, err := runtime.ImageRuntime().New(ctx, c.Args()[0], rtc.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, false, nil)
+ newImage, err := runtime.ImageRuntime().New(ctx, c.InputArgs[0], rtc.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, false, nil)
if err != nil {
return nil, nil, err
}
@@ -264,7 +282,7 @@ func isPortInImagePorts(exposedPorts map[string]struct{}, port string) bool {
return false
}
-func configureEntrypoint(c *cli.Context, data *inspect.ImageData) []string {
+func configureEntrypoint(c *cliconfig.PodmanCommand, data *inspect.ImageData) []string {
entrypoint := []string{}
if c.IsSet("entrypoint") {
// Force entrypoint to ""
@@ -284,7 +302,7 @@ func configureEntrypoint(c *cli.Context, data *inspect.ImageData) []string {
return entrypoint
}
-func configurePod(c *cli.Context, runtime *libpod.Runtime, namespaces map[string]string, podName string) (map[string]string, error) {
+func configurePod(c *cliconfig.PodmanCommand, runtime *libpod.Runtime, namespaces map[string]string, podName string) (map[string]string, error) {
pod, err := runtime.LookupPod(podName)
if err != nil {
return namespaces, err
@@ -296,7 +314,7 @@ func configurePod(c *cli.Context, runtime *libpod.Runtime, namespaces map[string
if (namespaces["pid"] == cc.Pod) || (!c.IsSet("pid") && pod.SharesPID()) {
namespaces["pid"] = fmt.Sprintf("container:%s", podInfraID)
}
- if (namespaces["net"] == cc.Pod) || (!c.IsSet("net") && pod.SharesNet()) {
+ if (namespaces["net"] == cc.Pod) || (!c.IsSet("net") && !c.IsSet("network") && pod.SharesNet()) {
namespaces["net"] = fmt.Sprintf("container:%s", podInfraID)
}
if (namespaces["user"] == cc.Pod) || (!c.IsSet("user") && pod.SharesUser()) {
@@ -313,7 +331,7 @@ func configurePod(c *cli.Context, runtime *libpod.Runtime, namespaces map[string
// Parses CLI options related to container creation into a config which can be
// parsed into an OCI runtime spec
-func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtime, imageName string, data *inspect.ImageData) (*cc.CreateConfig, error) {
+func parseCreateOpts(ctx context.Context, c *cliconfig.PodmanCommand, runtime *libpod.Runtime, imageName string, data *inspect.ImageData) (*cc.CreateConfig, error) {
var (
inputCommand, command []string
memoryLimit, memoryReservation, memorySwap, memoryKernel int64
@@ -335,14 +353,14 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
imageID := ""
- inputCommand = c.Args()[1:]
+ inputCommand = c.InputArgs[1:]
if data != nil {
imageID = data.ID
}
rootfs := ""
if c.Bool("rootfs") {
- rootfs = c.Args()[0]
+ rootfs = c.InputArgs[0]
}
sysctl, err := validateSysctl(c.StringSlice("sysctl"))
@@ -382,27 +400,24 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
blkioWeight = uint16(u)
}
var mountList []spec.Mount
- if mountList, err = parseMounts(c.StringSlice("mount")); err != nil {
+ if mountList, err = parseMounts(c.StringArray("mount")); err != nil {
return nil, err
}
- if err = parseVolumes(c.StringSlice("volume")); err != nil {
+ if err = parseVolumes(c.StringArray("volume")); err != nil {
return nil, err
}
- if err = parseVolumesFrom(c.StringSlice("volumes-from")); err != nil {
+ if err = parseVolumesFrom(c.StringArray("volumes-from")); err != nil {
return nil, err
}
tty := c.Bool("tty")
- if c.Bool("detach") && c.Bool("rm") {
- return nil, errors.Errorf("--rm and --detach cannot be specified together")
- }
- if c.Int64("cpu-period") != 0 && c.Float64("cpus") > 0 {
+ if c.Flag("cpu-period").Changed && c.Flag("cpus").Changed {
return nil, errors.Errorf("--cpu-period and --cpus cannot be set together")
}
- if c.Int64("cpu-quota") != 0 && c.Float64("cpus") > 0 {
+ if c.Flag("cpu-quota").Changed && c.Flag("cpus").Changed {
return nil, errors.Errorf("--cpu-quota and --cpus cannot be set together")
}
@@ -420,9 +435,13 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
// Instead of integrating here, should be done in libpod
// However, that also involves setting up security opts
// when the pod's namespace is integrated
+ namespaceNet := c.String("network")
+ if c.Flag("net").Changed {
+ namespaceNet = c.String("net")
+ }
namespaces = map[string]string{
"pid": c.String("pid"),
- "net": c.String("net"),
+ "net": namespaceNet,
"ipc": c.String("ipc"),
"user": c.String("userns"),
"uts": c.String("uts"),
@@ -627,11 +646,6 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
if util.StringInSlice(".", c.StringSlice("dns-search")) && len(c.StringSlice("dns-search")) > 1 {
return nil, errors.Errorf("cannot pass additional search domains when also specifying '.'")
}
- if !netMode.IsPrivate() {
- if c.IsSet("dns-search") || c.IsSet("dns") || c.IsSet("dns-opt") {
- return nil, errors.Errorf("specifying DNS flags when network mode is shared with the host or another container is not allowed")
- }
- }
// Validate domains are good
for _, dom := range c.StringSlice("dns-search") {
@@ -641,9 +655,10 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
}
var ImageVolumes map[string]struct{}
- if data != nil {
+ if data != nil && c.String("image-volume") != "ignore" {
ImageVolumes = data.Config.Volumes
}
+
var imageVolType = map[string]string{
"bind": "",
"tmpfs": "",
@@ -654,7 +669,7 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
}
var systemd bool
- if command != nil && c.BoolT("systemd") && ((filepath.Base(command[0]) == "init") || (filepath.Base(command[0]) == "systemd")) {
+ if command != nil && c.Bool("systemd") && ((filepath.Base(command[0]) == "init") || (filepath.Base(command[0]) == "systemd")) {
systemd = true
if signalString == "" {
stopSignal, err = signal.ParseSignal("RTMIN+3")
@@ -663,7 +678,17 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
}
}
}
+ // This is done because cobra cannot have two aliased flags. So we have to check
+ // both
+ network := c.String("network")
+ if c.Flag("net").Changed {
+ network = c.String("net")
+ }
+ var memorySwappiness int64
+ if c.Flags().Lookup("memory-swappiness") != nil {
+ memorySwappiness, _ = c.Flags().GetInt64("memory-swappiness")
+ }
config := &cc.CreateConfig{
Runtime: runtime,
Annotations: annotations,
@@ -697,7 +722,7 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
LogDriverOpt: c.StringSlice("log-opt"),
MacAddress: c.String("mac-address"),
Name: c.String("name"),
- Network: c.String("network"),
+ Network: network,
NetworkAlias: c.StringSlice("network-alias"),
IpcMode: ipcMode,
NetMode: netMode,
@@ -730,12 +755,11 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
Memory: memoryLimit,
MemoryReservation: memoryReservation,
MemorySwap: memorySwap,
- MemorySwappiness: c.Int("memory-swappiness"),
+ MemorySwappiness: int(memorySwappiness),
KernelMemory: memoryKernel,
OomScoreAdj: c.Int("oom-score-adj"),
-
- PidsLimit: c.Int64("pids-limit"),
- Ulimit: c.StringSlice("ulimit"),
+ PidsLimit: c.Int64("pids-limit"),
+ Ulimit: c.StringSlice("ulimit"),
},
Rm: c.Bool("rm"),
StopSignal: stopSignal,
@@ -747,13 +771,12 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
User: user,
UsernsMode: usernsMode,
Mounts: mountList,
- Volumes: c.StringSlice("volume"),
+ Volumes: c.StringArray("volume"),
WorkDir: workDir,
Rootfs: rootfs,
VolumesFrom: c.StringSlice("volumes-from"),
- Syslog: c.GlobalBool("syslog"),
+ Syslog: c.GlobalFlags.Syslog,
}
-
if c.Bool("init") {
initPath := c.String("init-path")
if initPath == "" {
@@ -767,11 +790,11 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
if config.Privileged {
config.LabelOpts = label.DisableSecOpt()
} else {
- if err := parseSecurityOpt(config, c.StringSlice("security-opt")); err != nil {
+ if err := parseSecurityOpt(config, c.StringArray("security-opt")); err != nil {
return nil, err
}
}
- config.SecurityOpts = c.StringSlice("security-opt")
+ config.SecurityOpts = c.StringArray("security-opt")
warnings, err := verifyContainerResources(config, false)
if err != nil {
return nil, err
@@ -840,6 +863,12 @@ func joinOrCreateRootlessUserNamespace(createConfig *cc.CreateConfig, runtime *l
if err != nil {
return false, -1, err
}
+ if pid == 0 {
+ if createConfig.Pod != "" {
+ continue
+ }
+ return false, -1, errors.Errorf("dependency container %s is not running", ctr.ID())
+ }
return rootless.JoinNS(uint(pid))
}
}
diff --git a/cmd/podman/create_cli.go b/cmd/podman/create_cli.go
index 95b9321fd..ae0549687 100644
--- a/cmd/podman/create_cli.go
+++ b/cmd/podman/create_cli.go
@@ -80,19 +80,22 @@ func addWarning(warnings []string, msg string) []string {
// podman run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// podman run --mount type=tmpfs,target=/dev/shm ..
func parseMounts(mounts []string) ([]spec.Mount, error) {
+ // TODO(vrothberg): the manual parsing can be replaced with a regular expression
+ // to allow a more robust parsing of the mount format and to give
+ // precise errors regarding supported format versus suppored options.
var mountList []spec.Mount
- errInvalidSyntax := errors.Errorf("incorrect mount format : should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>,[options]")
+ errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
for _, mount := range mounts {
var tokenCount int
var mountInfo spec.Mount
arr := strings.SplitN(mount, ",", 2)
if len(arr) < 2 {
- return nil, errInvalidSyntax
+ return nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
}
kv := strings.Split(arr[0], "=")
if kv[0] != "type" {
- return nil, errInvalidSyntax
+ return nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
}
switch kv[1] {
case "bind":
@@ -168,7 +171,7 @@ func parseVolumes(volumes []string) error {
for _, volume := range volumes {
arr := strings.SplitN(volume, ":", 3)
if len(arr) < 2 {
- return errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir:[option]", volume)
+ return errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
}
if err := validateVolumeHostDir(arr[0]); err != nil {
return err
diff --git a/cmd/podman/diff.go b/cmd/podman/diff.go
index 5f813699f..e2d258ad4 100644
--- a/cmd/podman/diff.go
+++ b/cmd/podman/diff.go
@@ -2,12 +2,12 @@ package main
import (
"fmt"
-
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
type diffJSONOutput struct {
@@ -33,31 +33,37 @@ func (so stdoutStruct) Out() error {
}
var (
- diffFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "archive",
- Usage: "Save the diff as a tar archive",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Change the output format.",
- },
- }
+ diffCommand cliconfig.DiffValues
diffDescription = fmt.Sprint(`Displays changes on a container or image's filesystem. The
container or image will be compared to its parent layer`)
- diffCommand = cli.Command{
- Name: "diff",
- Usage: "Inspect changes on container's file systems",
- Description: diffDescription,
- Flags: sortFlags(diffFlags),
- Action: diffCmd,
- ArgsUsage: "ID-NAME",
- OnUsageError: usageErrorHandler,
+ _diffCommand = &cobra.Command{
+ Use: "diff",
+ Short: "Inspect changes on container's file systems",
+ Long: diffDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ diffCommand.InputArgs = args
+ diffCommand.GlobalFlags = MainGlobalOpts
+ return diffCmd(&diffCommand)
+ },
+ Example: `podman diff imageID
+ podman diff ctrID
+ podman diff --format json redis:alpine`,
}
)
+func init() {
+ diffCommand.Command = _diffCommand
+ diffCommand.SetUsageTemplate(UsageTemplate())
+ flags := diffCommand.Flags()
+
+ flags.BoolVar(&diffCommand.Archive, "archive", true, "Save the diff as a tar archive")
+ flags.StringVar(&diffCommand.Format, "format", "", "Change the output format")
+
+ flags.MarkHidden("archive")
+
+}
+
func formatJSON(output []diffOutputParams) (diffJSONOutput, error) {
jsonStruct := diffJSONOutput{}
for _, output := range output {
@@ -75,29 +81,25 @@ func formatJSON(output []diffOutputParams) (diffJSONOutput, error) {
return jsonStruct, nil
}
-func diffCmd(c *cli.Context) error {
- if err := validateFlags(c, diffFlags); err != nil {
- return err
- }
-
- if len(c.Args()) != 1 {
+func diffCmd(c *cliconfig.DiffValues) error {
+ if len(c.InputArgs) != 1 {
return errors.Errorf("container, image, or layer name must be specified: podman diff [options [...]] ID-NAME")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- to := c.Args().Get(0)
+ to := c.InputArgs[0]
changes, err := runtime.GetDiff("", to)
if err != nil {
return errors.Wrapf(err, "could not get changes for %q", to)
}
diffOutput := []diffOutputParams{}
- outputFormat := c.String("format")
+ outputFormat := c.Format
for _, change := range changes {
diff --git a/cmd/podman/docker/types.go b/cmd/podman/docker/types.go
deleted file mode 100644
index eda618e40..000000000
--- a/cmd/podman/docker/types.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package docker
-
-//
-// Types extracted from Docker
-//
-
-import (
- "time"
-
- "github.com/containers/image/pkg/strslice"
- "github.com/opencontainers/go-digest"
-)
-
-// TypeLayers github.com/docker/docker/image/rootfs.go
-const TypeLayers = "layers"
-
-// V2S2MediaTypeManifest github.com/docker/distribution/manifest/schema2/manifest.go
-const V2S2MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
-
-// V2S2MediaTypeImageConfig github.com/docker/distribution/manifest/schema2/manifest.go
-const V2S2MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
-
-// V2S2MediaTypeLayer github.com/docker/distribution/manifest/schema2/manifest.go
-const V2S2MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
-
-// V2S2MediaTypeUncompressedLayer github.com/docker/distribution/manifest/schema2/manifest.go
-const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
-
-// V2S2RootFS describes images root filesystem
-// This is currently a placeholder that only supports layers. In the future
-// this can be made into an interface that supports different implementations.
-// github.com/docker/docker/image/rootfs.go
-type V2S2RootFS struct {
- Type string `json:"type"`
- DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
-}
-
-// V2S2History stores build commands that were used to create an image
-// github.com/docker/docker/image/image.go
-type V2S2History struct {
- // Created is the timestamp at which the image was created
- Created time.Time `json:"created"`
- // Author is the name of the author that was specified when committing the image
- Author string `json:"author,omitempty"`
- // CreatedBy keeps the Dockerfile command used while building the image
- CreatedBy string `json:"created_by,omitempty"`
- // Comment is the commit message that was set when committing the image
- Comment string `json:"comment,omitempty"`
- // EmptyLayer is set to true if this history item did not generate a
- // layer. Otherwise, the history item is associated with the next
- // layer in the RootFS section.
- EmptyLayer bool `json:"empty_layer,omitempty"`
-}
-
-// ID is the content-addressable ID of an image.
-// github.com/docker/docker/image/image.go
-type ID digest.Digest
-
-// HealthConfig holds configuration settings for the HEALTHCHECK feature.
-// github.com/docker/docker/api/types/container/config.go
-type HealthConfig struct {
- // Test is the test to perform to check that the container is healthy.
- // An empty slice means to inherit the default.
- // The options are:
- // {} : inherit healthcheck
- // {"NONE"} : disable healthcheck
- // {"CMD", args...} : exec arguments directly
- // {"CMD-SHELL", command} : run command with system's default shell
- Test []string `json:",omitempty"`
-
- // Zero means to inherit. Durations are expressed as integer nanoseconds.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
- StartPeriod time.Duration `json:",omitempty"` // Time to wait after the container starts before running the first check.
-
- // Retries is the number of consecutive failures needed to consider a container as unhealthy.
- // Zero means inherit.
- Retries int `json:",omitempty"`
-}
-
-// PortSet is a collection of structs indexed by Port
-// github.com/docker/go-connections/nat/nat.go
-type PortSet map[Port]struct{}
-
-// Port is a string containing port number and protocol in the format "80/tcp"
-// github.com/docker/go-connections/nat/nat.go
-type Port string
-
-// Config contains the configuration data about a container.
-// It should hold only portable information about the container.
-// Here, "portable" means "independent from the host we are running on".
-// Non-portable information *should* appear in HostConfig.
-// All fields added to this struct must be marked `omitempty` to keep getting
-// predictable hashes from the old `v1Compatibility` configuration.
-// github.com/docker/docker/api/types/container/config.go
-type Config struct {
- Hostname string // Hostname
- Domainname string // Domainname
- User string // User that will run the command(s) inside the container, also support user:group
- AttachStdin bool // Attach the standard input, makes possible user interaction
- AttachStdout bool // Attach the standard output
- AttachStderr bool // Attach the standard error
- ExposedPorts PortSet `json:",omitempty"` // List of exposed ports
- Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
- OpenStdin bool // Open stdin
- StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
- Env []string // List of environment variable to set in the container
- Cmd strslice.StrSlice // Command to run when starting the container
- Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
- ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
- Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
- Volumes map[string]struct{} // List of volumes (mounts) used for the container
- WorkingDir string // Current directory (PWD) in the command will be launched
- Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
- NetworkDisabled bool `json:",omitempty"` // Is network disabled
- MacAddress string `json:",omitempty"` // Mac Address of the container
- OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
- Labels map[string]string // List of labels set to this container
- StopSignal string `json:",omitempty"` // Signal to stop a container
- StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
- Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
-}
-
-// V1Compatibility - For non-top-level layers, create fake V1Compatibility
-// strings that fit the format and don't collide with anything else, but
-// don't result in runnable images on their own.
-// github.com/docker/distribution/manifest/schema1/config_builder.go
-type V1Compatibility struct {
- ID string `json:"id"`
- Parent string `json:"parent,omitempty"`
- Comment string `json:"comment,omitempty"`
- Created time.Time `json:"created"`
- Config struct {
- Cmd []string
- } `json:"container_config,omitempty"`
- Author string `json:"author,omitempty"`
- ThrowAway bool `json:"throwaway,omitempty"`
-}
-
-// V1Image stores the V1 image configuration.
-// github.com/docker/docker/image/image.go
-type V1Image struct {
- // ID is a unique 64 character identifier of the image
- ID string `json:"id,omitempty"`
- // Parent is the ID of the parent image
- Parent string `json:"parent,omitempty"`
- // Comment is the commit message that was set when committing the image
- Comment string `json:"comment,omitempty"`
- // Created is the timestamp at which the image was created
- Created time.Time `json:"created"`
- // Container is the id of the container used to commit
- Container string `json:"container,omitempty"`
- // ContainerConfig is the configuration of the container that is committed into the image
- ContainerConfig Config `json:"container_config,omitempty"`
- // DockerVersion specifies the version of Docker that was used to build the image
- DockerVersion string `json:"docker_version,omitempty"`
- // Author is the name of the author that was specified when committing the image
- Author string `json:"author,omitempty"`
- // Config is the configuration of the container received from the client
- Config *Config `json:"config,omitempty"`
- // Architecture is the hardware that the image is build and runs on
- Architecture string `json:"architecture,omitempty"`
- // OS is the operating system used to build and run the image
- OS string `json:"os,omitempty"`
- // Size is the total size of the image including all layers it is composed of
- Size int64 `json:",omitempty"`
-}
-
-// V2Image stores the image configuration
-// github.com/docker/docker/image/image.go
-type V2Image struct {
- V1Image
- Parent ID `json:"parent,omitempty"`
- RootFS *V2S2RootFS `json:"rootfs,omitempty"`
- History []V2S2History `json:"history,omitempty"`
- OSVersion string `json:"os.version,omitempty"`
- OSFeatures []string `json:"os.features,omitempty"`
-
- // rawJSON caches the immutable JSON associated with this image.
- //rawJSON []byte
-
- // computedID is the ID computed from the hash of the image config.
- // Not to be confused with the legacy V1 ID in V1Image.
- //computedID ID
-}
-
-// V2Versioned provides a struct with the manifest schemaVersion and mediaType.
-// Incoming content with unknown schema version can be decoded against this
-// struct to check the version.
-// github.com/docker/distribution/manifest/versioned.go
-type V2Versioned struct {
- // SchemaVersion is the image manifest schema that this image follows
- SchemaVersion int `json:"schemaVersion"`
-
- // MediaType is the media type of this schema.
- MediaType string `json:"mediaType,omitempty"`
-}
-
-// V2S1FSLayer is a container struct for BlobSums defined in an image manifest
-// github.com/docker/distribution/manifest/schema1/manifest.go
-type V2S1FSLayer struct {
- // BlobSum is the tarsum of the referenced filesystem image layer
- BlobSum digest.Digest `json:"blobSum"`
-}
-
-// V2S1History stores unstructured v1 compatibility information
-// github.com/docker/distribution/manifest/schema1/manifest.go
-type V2S1History struct {
- // V1Compatibility is the raw v1 compatibility information
- V1Compatibility string `json:"v1Compatibility"`
-}
-
-// V2S1Manifest provides the base accessible fields for working with V2 image
-// format in the registry.
-// github.com/docker/distribution/manifest/schema1/manifest.go
-type V2S1Manifest struct {
- V2Versioned
-
- // Name is the name of the image's repository
- Name string `json:"name"`
-
- // Tag is the tag of the image specified by this manifest
- Tag string `json:"tag"`
-
- // Architecture is the host architecture on which this image is intended to
- // run
- Architecture string `json:"architecture"`
-
- // FSLayers is a list of filesystem layer blobSums contained in this image
- FSLayers []V2S1FSLayer `json:"fsLayers"`
-
- // History is a list of unstructured historical data for v1 compatibility
- History []V2S1History `json:"history"`
-}
-
-// V2S2Descriptor describes targeted content. Used in conjunction with a blob
-// store, a descriptor can be used to fetch, store and target any kind of
-// blob. The struct also describes the wire protocol format. Fields should
-// only be added but never changed.
-// github.com/docker/distribution/blobs.go
-type V2S2Descriptor struct {
- // MediaType describe the type of the content. All text based formats are
- // encoded as utf-8.
- MediaType string `json:"mediaType,omitempty"`
-
- // Size in bytes of content.
- Size int64 `json:"size,omitempty"`
-
- // Digest uniquely identifies the content. A byte stream can be verified
- // against against this digest.
- Digest digest.Digest `json:"digest,omitempty"`
-
- // URLs contains the source URLs of this content.
- URLs []string `json:"urls,omitempty"`
-
- // NOTE: Before adding a field here, please ensure that all
- // other options have been exhausted. Much of the type relationships
- // depend on the simplicity of this type.
-}
-
-// V2S2Manifest defines a schema2 manifest.
-// github.com/docker/distribution/manifest/schema2/manifest.go
-type V2S2Manifest struct {
- V2Versioned
-
- // Config references the image configuration as a blob.
- Config V2S2Descriptor `json:"config"`
-
- // Layers lists descriptors for the layers referenced by the
- // configuration.
- Layers []V2S2Descriptor `json:"layers"`
-}
diff --git a/cmd/podman/errors.go b/cmd/podman/errors.go
new file mode 100644
index 000000000..2572b8779
--- /dev/null
+++ b/cmd/podman/errors.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+)
+
+func outputError(err error) {
+ if MainGlobalOpts.LogLevel == "debug" {
+ logrus.Errorf(err.Error())
+ } else {
+ if ee, ok := err.(*exec.ExitError); ok {
+ if status, ok := ee.Sys().(syscall.WaitStatus); ok {
+ exitCode = status.ExitStatus()
+ }
+ }
+ fmt.Fprintln(os.Stderr, "Error:", err.Error())
+ }
+}
diff --git a/cmd/podman/exec.go b/cmd/podman/exec.go
index 073e72e64..7040a7b09 100644
--- a/cmd/podman/exec.go
+++ b/cmd/podman/exec.go
@@ -2,82 +2,78 @@ package main
import (
"fmt"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
- "github.com/urfave/cli"
)
var (
- execFlags = []cli.Flag{
- cli.StringSliceFlag{
- Name: "env, e",
- Usage: "Set environment variables",
- },
- cli.BoolFlag{
- Name: "privileged",
- Usage: "Give the process extended Linux capabilities inside the container. The default is false",
- },
- cli.BoolFlag{
- Name: "interactive, i",
- Usage: "Not supported. All exec commands are interactive by default.",
- },
- cli.BoolFlag{
- Name: "tty, t",
- Usage: "Allocate a pseudo-TTY. The default is false",
- },
- cli.StringFlag{
- Name: "user, u",
- Usage: "Sets the username or UID used and optionally the groupname or GID for the specified command",
- },
- LatestFlag,
- WorkDirFlag,
- }
+ execCommand cliconfig.ExecValues
+
execDescription = `
podman exec
Run a command in a running container
`
-
- execCommand = cli.Command{
- Name: "exec",
- Usage: "Run a process in a running container",
- Description: execDescription,
- Flags: sortFlags(execFlags),
- Action: execCmd,
- ArgsUsage: "CONTAINER-NAME",
- SkipArgReorder: true,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _execCommand = &cobra.Command{
+ Use: "exec",
+ Short: "Run a process in a running container",
+ Long: execDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ execCommand.InputArgs = args
+ execCommand.GlobalFlags = MainGlobalOpts
+ return execCmd(&execCommand)
+ },
+ Example: `podman exec -it ctrID ls
+ podman exec -it -w /tmp myCtr pwd
+ podman exec --user root ctrID ls`,
}
)
-func execCmd(c *cli.Context) error {
- args := c.Args()
+func init() {
+ execCommand.Command = _execCommand
+ execCommand.SetUsageTemplate(UsageTemplate())
+ flags := execCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.StringSliceVarP(&execCommand.Env, "env", "e", []string{}, "Set environment variables")
+ flags.BoolVarP(&execCommand.Interfactive, "interactive", "i", false, "Not supported. All exec commands are interactive by default")
+ flags.BoolVarP(&execCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.BoolVar(&execCommand.Privileged, "privileged", false, "Give the process extended Linux capabilities inside the container. The default is false")
+ flags.BoolVarP(&execCommand.Tty, "tty", "t", false, "Allocate a pseudo-TTY. The default is false")
+ flags.StringVarP(&execCommand.User, "user", "u", "", "Sets the username or UID used and optionally the groupname or GID for the specified command")
+
+ flags.StringVarP(&execCommand.Workdir, "workdir", "w", "", "Working directory inside the container")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func execCmd(c *cliconfig.ExecValues) error {
+ args := c.InputArgs
var ctr *libpod.Container
var err error
argStart := 1
- if len(args) < 1 && !c.Bool("latest") {
+ if len(args) < 1 && !c.Latest {
return errors.Errorf("you must provide one container name or id")
}
- if len(args) < 2 && !c.Bool("latest") {
+ if len(args) < 2 && !c.Latest {
return errors.Errorf("you must provide a command to exec")
}
- if c.Bool("latest") {
+ if c.Latest {
argStart = 0
}
rootless.SetSkipStorageSetup(true)
cmd := args[argStart:]
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- if c.Bool("latest") {
+ if c.Latest {
ctr, err = runtime.GetLatestContainer()
} else {
ctr, err = runtime.LookupContainer(args[0])
@@ -101,7 +97,7 @@ func execCmd(c *cli.Context) error {
// ENVIRONMENT VARIABLES
env := map[string]string{}
- if err := readKVStrings(env, []string{}, c.StringSlice("env")); err != nil {
+ if err := readKVStrings(env, []string{}, c.Env); err != nil {
return errors.Wrapf(err, "unable to process environment variables")
}
envs := []string{}
@@ -109,5 +105,5 @@ func execCmd(c *cli.Context) error {
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
}
- return ctr.Exec(c.Bool("tty"), c.Bool("privileged"), envs, cmd, c.String("user"), c.String("workdir"))
+ return ctr.Exec(c.Tty, c.Privileged, envs, cmd, c.User, c.Workdir)
}
diff --git a/cmd/podman/exists.go b/cmd/podman/exists.go
index a7601aaa2..74a4c841b 100644
--- a/cmd/podman/exists.go
+++ b/cmd/podman/exists.go
@@ -1,73 +1,89 @@
package main
import (
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
"os"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/adapter"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
)
var (
+ imageExistsCommand cliconfig.ImageExistsValues
+ containerExistsCommand cliconfig.ContainerExistsValues
+ podExistsCommand cliconfig.PodExistsValues
+
imageExistsDescription = `
podman image exists
Check if an image exists in local storage
`
-
- imageExistsCommand = cli.Command{
- Name: "exists",
- Usage: "Check if an image exists in local storage",
- Description: imageExistsDescription,
- Action: imageExistsCmd,
- ArgsUsage: "IMAGE-NAME",
- OnUsageError: usageErrorHandler,
- }
-)
-
-var (
containerExistsDescription = `
podman container exists
Check if a container exists in local storage
`
-
- containerExistsCommand = cli.Command{
- Name: "exists",
- Usage: "Check if a container exists in local storage",
- Description: containerExistsDescription,
- Action: containerExistsCmd,
- ArgsUsage: "CONTAINER-NAME",
- OnUsageError: usageErrorHandler,
- }
-)
-
-var (
podExistsDescription = `
podman pod exists
Check if a pod exists in local storage
`
+ _imageExistsCommand = &cobra.Command{
+ Use: "exists",
+ Short: "Check if an image exists in local storage",
+ Long: imageExistsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ imageExistsCommand.InputArgs = args
+ imageExistsCommand.GlobalFlags = MainGlobalOpts
+ return imageExistsCmd(&imageExistsCommand)
+ },
+ Example: `podman image exists imageID`,
+ }
+
+ _containerExistsCommand = &cobra.Command{
+ Use: "exists",
+ Short: "Check if a container exists in local storage",
+ Long: containerExistsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ containerExistsCommand.InputArgs = args
+ containerExistsCommand.GlobalFlags = MainGlobalOpts
+ return containerExistsCmd(&containerExistsCommand)
- podExistsCommand = cli.Command{
- Name: "exists",
- Usage: "Check if a pod exists in local storage",
- Description: podExistsDescription,
- Action: podExistsCmd,
- ArgsUsage: "POD-NAME",
- OnUsageError: usageErrorHandler,
+ },
+ Example: `podman container exists containerID`,
+ }
+
+ _podExistsCommand = &cobra.Command{
+ Use: "exists",
+ Short: "Check if a pod exists in local storage",
+ Long: podExistsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podExistsCommand.InputArgs = args
+ podExistsCommand.GlobalFlags = MainGlobalOpts
+ return podExistsCmd(&podExistsCommand)
+ },
+ Example: `podman pod exists podID`,
}
)
-func imageExistsCmd(c *cli.Context) error {
- args := c.Args()
+func init() {
+ imageExistsCommand.Command = _imageExistsCommand
+ imageExistsCommand.SetUsageTemplate(UsageTemplate())
+ containerExistsCommand.Command = _containerExistsCommand
+ containerExistsCommand.SetUsageTemplate(UsageTemplate())
+ podExistsCommand.Command = _podExistsCommand
+ podExistsCommand.SetUsageTemplate(UsageTemplate())
+}
+
+func imageExistsCmd(c *cliconfig.ImageExistsValues) error {
+ args := c.InputArgs
if len(args) > 1 || len(args) < 1 {
return errors.New("you may only check for the existence of one image at a time")
}
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -83,12 +99,12 @@ func imageExistsCmd(c *cli.Context) error {
return nil
}
-func containerExistsCmd(c *cli.Context) error {
- args := c.Args()
+func containerExistsCmd(c *cliconfig.ContainerExistsValues) error {
+ args := c.InputArgs
if len(args) > 1 || len(args) < 1 {
return errors.New("you may only check for the existence of one container at a time")
}
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -102,19 +118,19 @@ func containerExistsCmd(c *cli.Context) error {
return nil
}
-func podExistsCmd(c *cli.Context) error {
- args := c.Args()
+func podExistsCmd(c *cliconfig.PodExistsValues) error {
+ args := c.InputArgs
if len(args) > 1 || len(args) < 1 {
return errors.New("you may only check for the existence of one pod at a time")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
if _, err := runtime.LookupPod(args[0]); err != nil {
- if errors.Cause(err) == libpod.ErrNoSuchPod {
+ if errors.Cause(err) == libpod.ErrNoSuchPod || err.Error() == "io.podman.PodNotFound" {
os.Exit(1)
}
return err
diff --git a/cmd/podman/export.go b/cmd/podman/export.go
index eaa4e38a2..5873bad3d 100644
--- a/cmd/podman/export.go
+++ b/cmd/podman/export.go
@@ -3,50 +3,53 @@ package main
import (
"os"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- exportFlags = []cli.Flag{
- cli.StringFlag{
- Name: "output, o",
- Usage: "Write to a file, default is STDOUT",
- Value: "/dev/stdout",
- },
- }
+ exportCommand cliconfig.ExportValues
exportDescription = "Exports container's filesystem contents as a tar archive" +
" and saves it on the local machine."
- exportCommand = cli.Command{
- Name: "export",
- Usage: "Export container's filesystem contents as a tar archive",
- Description: exportDescription,
- Flags: sortFlags(exportFlags),
- Action: exportCmd,
- ArgsUsage: "CONTAINER",
- OnUsageError: usageErrorHandler,
+
+ _exportCommand = &cobra.Command{
+ Use: "export",
+ Short: "Export container's filesystem contents as a tar archive",
+ Long: exportDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ exportCommand.InputArgs = args
+ exportCommand.GlobalFlags = MainGlobalOpts
+ return exportCmd(&exportCommand)
+ },
+ Example: `podman export ctrID > myCtr.tar
+ podman export --output="myCtr.tar" ctrID`,
}
)
+func init() {
+ exportCommand.Command = _exportCommand
+ exportCommand.SetUsageTemplate(UsageTemplate())
+ flags := exportCommand.Flags()
+ flags.StringVarP(&exportCommand.Output, "output", "o", "/dev/stdout", "Write to a file, default is STDOUT")
+}
+
// exportCmd saves a container to a tarball on disk
-func exportCmd(c *cli.Context) error {
- if err := validateFlags(c, exportFlags); err != nil {
- return err
- }
+func exportCmd(c *cliconfig.ExportValues) error {
if os.Geteuid() != 0 {
rootless.SetSkipStorageSetup(true)
}
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- args := c.Args()
+ args := c.InputArgs
if len(args) == 0 {
return errors.Errorf("container id must be specified")
}
@@ -54,10 +57,11 @@ func exportCmd(c *cli.Context) error {
return errors.Errorf("too many arguments given, need 1 at most.")
}
- output := c.String("output")
+ output := c.Output
if runtime.Remote && (output == "/dev/stdout" || len(output) == 0) {
return errors.New("remote client usage must specify an output file (-o)")
}
+
if output == "/dev/stdout" {
file := os.Stdout
if logrus.IsTerminal(file) {
@@ -68,5 +72,5 @@ func exportCmd(c *cli.Context) error {
if err := validateFileName(output); err != nil {
return err
}
- return runtime.Export(args[0], c.String("output"))
+ return runtime.Export(args[0], output)
}
diff --git a/cmd/podman/formats/formats.go b/cmd/podman/formats/formats.go
index c454c39bd..37f9b8a20 100644
--- a/cmd/podman/formats/formats.go
+++ b/cmd/podman/formats/formats.go
@@ -120,11 +120,8 @@ func (t StdoutTemplateArray) Out() error {
fmt.Fprintln(w, "")
continue
}
- // Only print new line at the end of the output if stdout is the terminal
- if terminal.IsTerminal(int(os.Stdout.Fd())) {
- fmt.Fprintln(w, "")
- }
}
+ fmt.Fprintln(w, "")
return w.Flush()
}
diff --git a/cmd/podman/generate.go b/cmd/podman/generate.go
index 20a4a61ab..773d625ee 100644
--- a/cmd/podman/generate.go
+++ b/cmd/podman/generate.go
@@ -1,23 +1,22 @@
package main
import (
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
var (
- generateSubCommands = []cli.Command{
- containerKubeCommand,
- }
-
+ generateCommand cliconfig.PodmanCommand
generateDescription = "Generate structured data based for a containers and pods"
- kubeCommand = cli.Command{
- Name: "generate",
- Usage: "Generate structured data",
- Description: generateDescription,
- ArgsUsage: "",
- Subcommands: generateSubCommands,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
- Hidden: true,
+ _generateCommand = &cobra.Command{
+ Use: "generate",
+ Short: "Generated structured data",
+ Long: generateDescription,
}
)
+
+func init() {
+ generateCommand.Command = _generateCommand
+ generateCommand.AddCommand(getGenerateSubCommands()...)
+ generateCommand.SetUsageTemplate(UsageTemplate())
+}
diff --git a/cmd/podman/generate_kube.go b/cmd/podman/generate_kube.go
index fc667fb5b..15f374c73 100644
--- a/cmd/podman/generate_kube.go
+++ b/cmd/podman/generate_kube.go
@@ -2,38 +2,43 @@ package main
import (
"fmt"
-
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
podmanVersion "github.com/containers/libpod/version"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
"k8s.io/api/core/v1"
)
var (
- containerKubeFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "service, s",
- Usage: "Generate YAML for kubernetes service object",
- },
- }
+ containerKubeCommand cliconfig.GenerateKubeValues
containerKubeDescription = "Generate Kubernetes Pod YAML"
- containerKubeCommand = cli.Command{
- Name: "kube",
- Usage: "Generate Kubernetes pod YAML for a container or pod",
- Description: containerKubeDescription,
- Flags: sortFlags(containerKubeFlags),
- Action: generateKubeYAMLCmd,
- ArgsUsage: "CONTAINER|POD-NAME",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _containerKubeCommand = &cobra.Command{
+ Use: "kube",
+ Short: "Generate Kubernetes pod YAML for a container or pod",
+ Long: containerKubeDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ containerKubeCommand.InputArgs = args
+ containerKubeCommand.GlobalFlags = MainGlobalOpts
+ return generateKubeYAMLCmd(&containerKubeCommand)
+ },
+ Example: `podman generate kube ctrID
+ podman generate kube podID
+ podman generate kube --service podID`,
}
)
-func generateKubeYAMLCmd(c *cli.Context) error {
+func init() {
+ containerKubeCommand.Command = _containerKubeCommand
+ containerKubeCommand.SetUsageTemplate(UsageTemplate())
+ flags := containerKubeCommand.Flags()
+ flags.BoolVarP(&containerKubeCommand.Service, "service", "s", false, "Generate YAML for kubernetes service object")
+}
+
+func generateKubeYAMLCmd(c *cliconfig.GenerateKubeValues) error {
var (
podYAML *v1.Pod
container *libpod.Container
@@ -48,12 +53,12 @@ func generateKubeYAMLCmd(c *cli.Context) error {
if rootless.IsRootless() {
return errors.Wrapf(libpod.ErrNotImplemented, "rootless users")
}
- args := c.Args()
+ args := c.InputArgs
if len(args) > 1 || (len(args) < 1 && !c.Bool("latest")) {
return errors.Errorf("you must provide one container|pod ID or name or --latest")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -77,7 +82,7 @@ func generateKubeYAMLCmd(c *cli.Context) error {
return err
}
- if c.Bool("service") {
+ if c.Service {
serviceYAML := libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts)
marshalledService, err = yaml.Marshal(serviceYAML)
if err != nil {
diff --git a/cmd/podman/history.go b/cmd/podman/history.go
index 8a9b6cd94..103ef08e8 100644
--- a/cmd/podman/history.go
+++ b/cmd/podman/history.go
@@ -6,12 +6,13 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
- "github.com/containers/libpod/libpod/adapter"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/docker/go-units"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
const createdByTruncLength = 45
@@ -34,53 +35,44 @@ type historyOptions struct {
}
var (
- historyFlags = []cli.Flag{
- cli.BoolTFlag{
- Name: "human, H",
- Usage: "Display sizes and dates in human readable format",
- },
- cli.BoolFlag{
- Name: "no-trunc, notruncate",
- Usage: "Do not truncate the output",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Display the numeric IDs only",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Change the output to JSON or a Go template",
- },
- }
+ historyCommand cliconfig.HistoryValues
historyDescription = "Displays the history of an image. The information can be printed out in an easy to read, " +
"or user specified format, and can be truncated."
- historyCommand = cli.Command{
- Name: "history",
- Usage: "Show history of a specified image",
- Description: historyDescription,
- Flags: sortFlags(historyFlags),
- Action: historyCmd,
- ArgsUsage: "",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _historyCommand = &cobra.Command{
+ Use: "history",
+ Short: "Show history of a specified image",
+ Long: historyDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ historyCommand.InputArgs = args
+ historyCommand.GlobalFlags = MainGlobalOpts
+ return historyCmd(&historyCommand)
+ },
}
)
-func historyCmd(c *cli.Context) error {
- if err := validateFlags(c, historyFlags); err != nil {
- return err
- }
+func init() {
+ historyCommand.Command = _historyCommand
+ historyCommand.SetUsageTemplate(UsageTemplate())
+ flags := historyCommand.Flags()
+ flags.StringVar(&historyCommand.Format, "format", "", "Change the output to JSON or a Go template")
+ flags.BoolVarP(&historyCommand.Human, "human", "H", true, "Display sizes and dates in human readable format")
+ // notrucate needs to be added
+ flags.BoolVar(&historyCommand.NoTrunc, "no-trunc", false, "Do not truncate the output")
+ flags.BoolVarP(&historyCommand.Quiet, "quiet", "q", false, "Display the numeric IDs only")
+
+}
- runtime, err := adapter.GetRuntime(c)
+func historyCmd(c *cliconfig.HistoryValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- format := genHistoryFormat(c.String("format"), c.Bool("quiet"))
+ format := genHistoryFormat(c.Format, c.Quiet)
- args := c.Args()
+ args := c.InputArgs
if len(args) == 0 {
return errors.Errorf("an image name must be specified")
}
@@ -93,9 +85,9 @@ func historyCmd(c *cli.Context) error {
return err
}
opts := historyOptions{
- human: c.BoolT("human"),
- noTrunc: c.Bool("no-trunc"),
- quiet: c.Bool("quiet"),
+ human: c.Human,
+ noTrunc: c.NoTrunc,
+ quiet: c.Quiet,
format: format,
}
diff --git a/cmd/podman/image.go b/cmd/podman/image.go
index 6b451a1ca..14053cb0d 100644
--- a/cmd/podman/image.go
+++ b/cmd/podman/image.go
@@ -1,37 +1,40 @@
package main
import (
- "sort"
-
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
var (
- imageSubCommands = []cli.Command{
- importCommand,
- historyCommand,
- imageExistsCommand,
- inspectCommand,
- lsImagesCommand,
- pruneImagesCommand,
- pullCommand,
- rmImageCommand,
- tagCommand,
- }
imageDescription = "Manage images"
- imageCommand = cli.Command{
- Name: "image",
- Usage: "Manage images",
- Description: imageDescription,
- ArgsUsage: "",
- Subcommands: getImageSubCommandsSorted(),
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ imageCommand = cliconfig.PodmanCommand{
+ Command: &cobra.Command{
+ Use: "image",
+ Short: "Manage images",
+ Long: imageDescription,
+ },
}
)
-func getImageSubCommandsSorted() []cli.Command {
- imageSubCommands = append(imageSubCommands, getImageSubCommands()...)
- sort.Sort(commandSortedAlpha{imageSubCommands})
- return imageSubCommands
+//imageSubCommands are implemented both in local and remote clients
+var imageSubCommands = []*cobra.Command{
+ _buildCommand,
+ _historyCommand,
+ _imageExistsCommand,
+ _imagesCommand,
+ _importCommand,
+ _inspectCommand,
+ _loadCommand,
+ _pruneImagesCommand,
+ _pullCommand,
+ _pushCommand,
+ _rmiCommand,
+ _saveCommand,
+ _tagCommand,
+}
+
+func init() {
+ imageCommand.SetUsageTemplate(UsageTemplate())
+ imageCommand.AddCommand(imageSubCommands...)
+ imageCommand.AddCommand(getImageSubCommands()...)
}
diff --git a/cmd/podman/imagefilters/filters.go b/cmd/podman/imagefilters/filters.go
index 366510202..d01eb7436 100644
--- a/cmd/podman/imagefilters/filters.go
+++ b/cmd/podman/imagefilters/filters.go
@@ -5,7 +5,7 @@ import (
"strings"
"time"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/inspect"
)
diff --git a/cmd/podman/images.go b/cmd/podman/images.go
index 9fdf0a780..6e82195a9 100644
--- a/cmd/podman/images.go
+++ b/cmd/podman/images.go
@@ -8,15 +8,16 @@ import (
"time"
"unicode"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/imagefilters"
- "github.com/containers/libpod/libpod/adapter"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/docker/go-units"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
type imagesTemplateParams struct {
@@ -83,108 +84,80 @@ func (a imagesSortedSize) Less(i, j int) bool {
}
var (
- imagesFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Show all images (default hides intermediate images)",
- },
- cli.BoolFlag{
- Name: "digests",
- Usage: "Show digests",
- },
- cli.StringSliceFlag{
- Name: "filter, f",
- Usage: "Filter output based on conditions provided (default [])",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Change the output format to JSON or a Go template",
- },
- cli.BoolFlag{
- Name: "noheading, n",
- Usage: "Do not print column headings",
- },
- cli.BoolFlag{
- Name: "no-trunc, notruncate",
- Usage: "Do not truncate output",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Display only image IDs",
- },
- cli.StringFlag{
- Name: "sort",
- Usage: "Sort by created, id, repository, size, or tag",
- Value: "Created",
- },
- }
-
+ imagesCommand cliconfig.ImagesValues
imagesDescription = "lists locally stored images."
- imagesCommand = cli.Command{
- Name: "images",
- Usage: "List images in local storage",
- Description: imagesDescription,
- Flags: sortFlags(imagesFlags),
- Action: imagesCmd,
- ArgsUsage: "",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
- }
- lsImagesCommand = cli.Command{
- Name: "list",
- Aliases: []string{"ls"},
- Usage: "List images in local storage",
- Description: imagesDescription,
- Flags: imagesFlags,
- Action: imagesCmd,
- ArgsUsage: "",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+
+ _imagesCommand = &cobra.Command{
+ Use: "images",
+ Short: "List images in local storage",
+ Long: imagesDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ imagesCommand.InputArgs = args
+ imagesCommand.GlobalFlags = MainGlobalOpts
+ return imagesCmd(&imagesCommand)
+ },
+ Example: `podman images --format json
+ podman images --sort repository --format "table {{.ID}} {{.Repository}} {{.Tag}}"
+ podman images --filter dangling=true`,
}
)
-func imagesCmd(c *cli.Context) error {
+func init() {
+ imagesCommand.Command = _imagesCommand
+ imagesCommand.SetUsageTemplate(UsageTemplate())
+ flags := imagesCommand.Flags()
+ flags.BoolVarP(&imagesCommand.All, "all", "a", false, "Show all images (default hides intermediate images)")
+ flags.BoolVar(&imagesCommand.Digests, "digests", false, "Show digests")
+ flags.StringSliceVarP(&imagesCommand.Filter, "filter", "f", []string{}, "Filter output based on conditions provided (default [])")
+ flags.StringVar(&imagesCommand.Format, "format", "", "Change the output format to JSON or a Go template")
+ flags.BoolVarP(&imagesCommand.Noheading, "noheading", "n", false, "Do not print column headings")
+ // TODO Need to learn how to deal with second name being a string instead of a char.
+ // This needs to be "no-trunc, notruncate"
+ flags.BoolVar(&imagesCommand.NoTrunc, "no-trunc", false, "Do not truncate output")
+ flags.BoolVarP(&imagesCommand.Quiet, "quiet", "q", false, "Display only image IDs")
+ flags.StringVar(&imagesCommand.Sort, "sort", "created", "Sort by created, id, repository, size, or tag")
+
+}
+
+func imagesCmd(c *cliconfig.ImagesValues) error {
var (
filterFuncs []imagefilters.ResultFilter
newImage *adapter.ContainerImage
)
- if err := validateFlags(c, imagesFlags); err != nil {
- return err
- }
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "Could not get runtime")
}
defer runtime.Shutdown(false)
- if len(c.Args()) == 1 {
- newImage, err = runtime.NewImageFromLocal(c.Args().Get(0))
+ if len(c.InputArgs) == 1 {
+ newImage, err = runtime.NewImageFromLocal(c.InputArgs[0])
if err != nil {
return err
}
}
- if len(c.Args()) > 1 {
+ if len(c.InputArgs) > 1 {
return errors.New("'podman images' requires at most 1 argument")
}
ctx := getContext()
- if len(c.StringSlice("filter")) > 0 || newImage != nil {
- filterFuncs, err = CreateFilterFuncs(ctx, runtime, c, newImage)
+ if len(c.Filter) > 0 || newImage != nil {
+ filterFuncs, err = CreateFilterFuncs(ctx, runtime, c.Filter, newImage)
if err != nil {
return err
}
}
opts := imagesOptions{
- quiet: c.Bool("quiet"),
- noHeading: c.Bool("noheading"),
- noTrunc: c.Bool("no-trunc"),
- digests: c.Bool("digests"),
- format: c.String("format"),
- sort: c.String("sort"),
- all: c.Bool("all"),
+ quiet: c.Quiet,
+ noHeading: c.Noheading,
+ noTrunc: c.NoTrunc,
+ digests: c.Digests,
+ format: c.Format,
+ sort: c.Sort,
+ all: c.All,
}
opts.outputformat = opts.setOutputFormat()
@@ -195,7 +168,7 @@ func imagesCmd(c *cli.Context) error {
var filteredImages []*adapter.ContainerImage
//filter the images
- if len(c.StringSlice("filter")) > 0 || newImage != nil {
+ if len(c.Filter) > 0 || newImage != nil {
filteredImages = imagefilters.FilterImages(images, filterFuncs)
} else {
filteredImages = images
@@ -276,8 +249,12 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma
}
// get all specified repo:tag pairs and print them separately
+ repopairs, err := image.ReposToMap(img.Names())
+ if err != nil {
+ logrus.Errorf("error finding tag/digest for %s", img.ID())
+ }
outer:
- for repo, tags := range image.ReposToMap(img.Names()) {
+ for repo, tags := range repopairs {
for _, tag := range tags {
size, err := img.Size(ctx)
var sizeStr string
@@ -333,6 +310,7 @@ func getImagesJSONOutput(ctx context.Context, images []*adapter.ContainerImage)
// generateImagesOutput generates the images based on the format provided
func generateImagesOutput(ctx context.Context, images []*adapter.ContainerImage, opts imagesOptions) error {
+ templateMap := GenImageOutputMap()
if len(images) == 0 {
return nil
}
@@ -344,15 +322,17 @@ func generateImagesOutput(ctx context.Context, images []*adapter.ContainerImage,
out = formats.JSONStructArray{Output: imagesToGeneric([]imagesTemplateParams{}, imagesOutput)}
default:
imagesOutput := getImagesTemplateOutput(ctx, images, opts)
- out = formats.StdoutTemplateArray{Output: imagesToGeneric(imagesOutput, []imagesJSONParams{}), Template: opts.outputformat, Fields: imagesOutput[0].HeaderMap()}
+ out = formats.StdoutTemplateArray{Output: imagesToGeneric(imagesOutput, []imagesJSONParams{}), Template: opts.outputformat, Fields: templateMap}
}
return formats.Writer(out).Out()
}
-// HeaderMap produces a generic map of "headers" based on a line
-// of output
-func (i *imagesTemplateParams) HeaderMap() map[string]string {
- v := reflect.Indirect(reflect.ValueOf(i))
+// GenImageOutputMap generates the map used for outputting the images header
+// without requiring a populated image. This replaces the previous HeaderMap
+// call.
+func GenImageOutputMap() map[string]string {
+ io := imagesTemplateParams{}
+ v := reflect.Indirect(reflect.ValueOf(io))
values := make(map[string]string)
for i := 0; i < v.NumField(); i++ {
@@ -368,9 +348,9 @@ func (i *imagesTemplateParams) HeaderMap() map[string]string {
// CreateFilterFuncs returns an array of filter functions based on the user inputs
// and is later used to filter images for output
-func CreateFilterFuncs(ctx context.Context, r *adapter.LocalRuntime, c *cli.Context, img *adapter.ContainerImage) ([]imagefilters.ResultFilter, error) {
+func CreateFilterFuncs(ctx context.Context, r *adapter.LocalRuntime, filters []string, img *adapter.ContainerImage) ([]imagefilters.ResultFilter, error) {
var filterFuncs []imagefilters.ResultFilter
- for _, filter := range c.StringSlice("filter") {
+ for _, filter := range filters {
splitFilter := strings.Split(filter, "=")
switch splitFilter[0] {
case "before":
diff --git a/cmd/podman/images_prune.go b/cmd/podman/images_prune.go
index 844984bb9..79dcd097c 100644
--- a/cmd/podman/images_prune.go
+++ b/cmd/podman/images_prune.go
@@ -3,35 +3,40 @@ package main
import (
"fmt"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ pruneImagesCommand cliconfig.PruneImagesValues
pruneImagesDescription = `
podman image prune
Removes all unnamed images from local storage
`
- pruneImageFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Remove all unused images, not just dangling ones",
+ _pruneImagesCommand = &cobra.Command{
+ Use: "prune",
+ Short: "Remove unused images",
+ Long: pruneImagesDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ pruneImagesCommand.InputArgs = args
+ pruneImagesCommand.GlobalFlags = MainGlobalOpts
+ return pruneImagesCmd(&pruneImagesCommand)
},
}
- pruneImagesCommand = cli.Command{
- Name: "prune",
- Usage: "Remove unused images",
- Description: pruneImagesDescription,
- Action: pruneImagesCmd,
- OnUsageError: usageErrorHandler,
- Flags: pruneImageFlags,
- }
)
-func pruneImagesCmd(c *cli.Context) error {
- runtime, err := adapter.GetRuntime(c)
+func init() {
+ pruneImagesCommand.Command = _pruneImagesCommand
+ pruneImagesCommand.SetUsageTemplate(UsageTemplate())
+ flags := pruneImagesCommand.Flags()
+ flags.BoolVarP(&pruneImagesCommand.All, "all", "a", false, "Remove all unused images, not just dangling ones")
+}
+
+func pruneImagesCmd(c *cliconfig.PruneImagesValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -39,7 +44,7 @@ func pruneImagesCmd(c *cli.Context) error {
// Call prune; if any cids are returned, print them and then
// return err in case an error also came up
- pruneCids, err := runtime.PruneImages(c.Bool("all"))
+ pruneCids, err := runtime.PruneImages(c.All)
if len(pruneCids) > 0 {
for _, cid := range pruneCids {
fmt.Println(cid)
diff --git a/cmd/podman/import.go b/cmd/podman/import.go
index 661bd5a65..a64b03d6d 100644
--- a/cmd/podman/import.go
+++ b/cmd/podman/import.go
@@ -3,47 +3,46 @@ package main
import (
"fmt"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- importFlags = []cli.Flag{
- cli.StringSliceFlag{
- Name: "change, c",
- Usage: "Apply the following possible instructions to the created image (default []): CMD | ENTRYPOINT | ENV | EXPOSE | LABEL | STOPSIGNAL | USER | VOLUME | WORKDIR",
- },
- cli.StringFlag{
- Name: "message, m",
- Usage: "Set commit message for imported image",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress output",
- },
- }
+ importCommand cliconfig.ImportValues
+
importDescription = `Create a container image from the contents of the specified tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz).
Note remote tar balls can be specified, via web address.
Optionally tag the image. You can specify the instructions using the --change option.
`
- importCommand = cli.Command{
- Name: "import",
- Usage: "Import a tarball to create a filesystem image",
- Description: importDescription,
- Flags: sortFlags(importFlags),
- Action: importCmd,
- ArgsUsage: "TARBALL [REFERENCE]",
- OnUsageError: usageErrorHandler,
+ _importCommand = &cobra.Command{
+ Use: "import",
+ Short: "Import a tarball to create a filesystem image",
+ Long: importDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ importCommand.InputArgs = args
+ importCommand.GlobalFlags = MainGlobalOpts
+ return importCmd(&importCommand)
+ },
+ Example: `podman import http://example.com/ctr.tar url-image
+ cat ctr.tar | podman -q import --message "importing the ctr.tar tarball" - image-imported
+ cat ctr.tar | podman import -`,
}
)
-func importCmd(c *cli.Context) error {
- if err := validateFlags(c, importFlags); err != nil {
- return err
- }
+func init() {
+ importCommand.Command = _importCommand
+ importCommand.SetUsageTemplate(UsageTemplate())
+ flags := importCommand.Flags()
+ flags.StringSliceVarP(&importCommand.Change, "change", "c", []string{}, "Apply the following possible instructions to the created image (default []): CMD | ENTRYPOINT | ENV | EXPOSE | LABEL | STOPSIGNAL | USER | VOLUME | WORKDIR")
+ flags.StringVarP(&importCommand.Message, "message", "m", "", "Set commit message for imported image")
+ flags.BoolVarP(&importCommand.Quiet, "quiet", "q", false, "Suppress output")
+
+}
- runtime, err := adapter.GetRuntime(c)
+func importCmd(c *cliconfig.ImportValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -54,7 +53,7 @@ func importCmd(c *cli.Context) error {
reference string
)
- args := c.Args()
+ args := c.InputArgs
switch len(args) {
case 0:
return errors.Errorf("need to give the path to the tarball, or must specify a tarball of '-' for stdin")
@@ -71,7 +70,7 @@ func importCmd(c *cli.Context) error {
return err
}
- quiet := c.Bool("quiet")
+ quiet := c.Quiet
if runtime.Remote {
quiet = false
}
diff --git a/cmd/podman/info.go b/cmd/podman/info.go
index 19078cea5..a1473dac9 100644
--- a/cmd/podman/info.go
+++ b/cmd/podman/info.go
@@ -4,45 +4,47 @@ import (
"fmt"
rt "runtime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/version"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ infoCommand cliconfig.InfoValues
+
infoDescription = "Display podman system information"
- infoCommand = cli.Command{
- Name: "info",
- Usage: infoDescription,
- Description: `Information display here pertain to the host, current storage stats, and build of podman. Useful for the user and when reporting issues.`,
- Flags: sortFlags(infoFlags),
- Action: infoCmd,
- ArgsUsage: "",
- OnUsageError: usageErrorHandler,
- }
- infoFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "debug, D",
- Usage: "Display additional debug information",
- },
- cli.StringFlag{
- Name: "format, f",
- Usage: "Change the output format to JSON or a Go template",
+ _infoCommand = &cobra.Command{
+ Use: "info",
+ Long: infoDescription,
+ Short: `Display information pertaining to the host, current storage stats, and build of podman. Useful for the user and when reporting issues.`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ infoCommand.InputArgs = args
+ infoCommand.GlobalFlags = MainGlobalOpts
+ return infoCmd(&infoCommand)
},
+ Example: `podman info`,
}
)
-func infoCmd(c *cli.Context) error {
- if err := validateFlags(c, infoFlags); err != nil {
- return err
- }
+func init() {
+ infoCommand.Command = _infoCommand
+ infoCommand.SetUsageTemplate(UsageTemplate())
+ flags := infoCommand.Flags()
+
+ flags.BoolVarP(&infoCommand.Debug, "debug", "D", false, "Display additional debug information")
+ flags.StringVarP(&infoCommand.Format, "format", "f", "", "Change the output format to JSON or a Go template")
+
+}
+
+func infoCmd(c *cliconfig.InfoValues) error {
info := map[string]interface{}{}
remoteClientInfo := map[string]interface{}{}
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -59,7 +61,7 @@ func infoCmd(c *cli.Context) error {
infoArr = append(infoArr, libpod.InfoData{Type: "client", Data: remoteClientInfo})
}
- if !runtime.Remote && c.Bool("debug") {
+ if !runtime.Remote && c.Debug {
debugInfo := debugInfo(c)
infoArr = append(infoArr, libpod.InfoData{Type: "debug", Data: debugInfo})
}
@@ -69,7 +71,7 @@ func infoCmd(c *cli.Context) error {
}
var out formats.Writer
- infoOutputFormat := c.String("format")
+ infoOutputFormat := c.Format
switch infoOutputFormat {
case formats.JSONString:
out = formats.JSONStruct{Output: info}
@@ -85,11 +87,11 @@ func infoCmd(c *cli.Context) error {
}
// top-level "debug" info
-func debugInfo(c *cli.Context) map[string]interface{} {
+func debugInfo(c *cliconfig.InfoValues) map[string]interface{} {
info := map[string]interface{}{}
info["compiler"] = rt.Compiler
info["go version"] = rt.Version()
- info["podman version"] = c.App.Version
+ info["podman version"] = version.Version
version, _ := libpod.GetVersion()
info["git commit"] = version.GitCommit
return info
diff --git a/cmd/podman/inspect.go b/cmd/podman/inspect.go
index 1346da9fb..46883b31d 100644
--- a/cmd/podman/inspect.go
+++ b/cmd/podman/inspect.go
@@ -5,13 +5,14 @@ import (
"encoding/json"
"strings"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/pkg/adapter"
cc "github.com/containers/libpod/pkg/spec"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
const (
@@ -21,38 +22,39 @@ const (
)
var (
- inspectFlags = []cli.Flag{
- cli.StringFlag{
- Name: "type, t",
- Value: inspectAll,
- Usage: "Return JSON for specified type, (e.g image, container or task)",
- },
- cli.StringFlag{
- Name: "format, f",
- Usage: "Change the output format to a Go template",
- },
- cli.BoolFlag{
- Name: "size, s",
- Usage: "Display total file size if the type is container",
- },
- LatestFlag,
- }
+ inspectCommand cliconfig.InspectValues
+
inspectDescription = "This displays the low-level information on containers and images identified by name or ID. By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type."
- inspectCommand = cli.Command{
- Name: "inspect",
- Usage: "Display the configuration of a container or image",
- Description: inspectDescription,
- Flags: sortFlags(inspectFlags),
- Action: inspectCmd,
- ArgsUsage: "CONTAINER-OR-IMAGE [CONTAINER-OR-IMAGE]...",
- OnUsageError: usageErrorHandler,
+ _inspectCommand = &cobra.Command{
+ Use: "inspect",
+ Short: "Display the configuration of a container or image",
+ Long: inspectDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ inspectCommand.InputArgs = args
+ inspectCommand.GlobalFlags = MainGlobalOpts
+ return inspectCmd(&inspectCommand)
+ },
+ Example: `podman inspect alpine
+ podman inspect --format "imageId: {{.Id}} size: {{.Size}}" alpine
+ podman inspect --format "image: {{.ImageName}} driver: {{.Driver}}" myctr`,
}
)
-func inspectCmd(c *cli.Context) error {
- args := c.Args()
- inspectType := c.String("type")
- latestContainer := c.Bool("latest")
+func init() {
+ inspectCommand.Command = _inspectCommand
+ inspectCommand.SetUsageTemplate(UsageTemplate())
+ flags := inspectCommand.Flags()
+ flags.StringVarP(&inspectCommand.TypeObject, "type", "t", inspectAll, "Return JSON for specified type, (e.g image, container or task)")
+ flags.StringVarP(&inspectCommand.Format, "format", "f", "", "Change the output format to a Go template")
+ flags.BoolVarP(&inspectCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of if the type is a container")
+ flags.BoolVarP(&inspectCommand.Size, "size", "s", false, "Display total file size if the type is container")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func inspectCmd(c *cliconfig.InspectValues) error {
+ args := c.InputArgs
+ inspectType := c.TypeObject
+ latestContainer := c.Latest
if len(args) == 0 && !latestContainer {
return errors.Errorf("container or image name must be specified: podman inspect [options [...]] name")
}
@@ -60,11 +62,8 @@ func inspectCmd(c *cli.Context) error {
if len(args) > 0 && latestContainer {
return errors.Errorf("you cannot provide additional arguments with --latest")
}
- if err := validateFlags(c, inspectFlags); err != nil {
- return err
- }
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
@@ -74,7 +73,7 @@ func inspectCmd(c *cli.Context) error {
return errors.Errorf("the only recognized types are %q, %q, and %q", inspectTypeContainer, inspectTypeImage, inspectAll)
}
- outputFormat := c.String("format")
+ outputFormat := c.Format
if strings.Contains(outputFormat, "{{.Id}}") {
outputFormat = strings.Replace(outputFormat, "{{.Id}}", formats.IDString, -1)
}
@@ -87,7 +86,7 @@ func inspectCmd(c *cli.Context) error {
inspectType = inspectTypeContainer
}
- inspectedObjects, iterateErr := iterateInput(getContext(), c, args, runtime, inspectType)
+ inspectedObjects, iterateErr := iterateInput(getContext(), c.Size, args, runtime, inspectType)
if iterateErr != nil {
return iterateErr
}
@@ -105,7 +104,7 @@ func inspectCmd(c *cli.Context) error {
}
// func iterateInput iterates the images|containers the user has requested and returns the inspect data and error
-func iterateInput(ctx context.Context, c *cli.Context, args []string, runtime *adapter.LocalRuntime, inspectType string) ([]interface{}, error) {
+func iterateInput(ctx context.Context, size bool, args []string, runtime *adapter.LocalRuntime, inspectType string) ([]interface{}, error) {
var (
data interface{}
inspectedItems []interface{}
@@ -120,7 +119,7 @@ func iterateInput(ctx context.Context, c *cli.Context, args []string, runtime *a
inspectError = errors.Wrapf(err, "error looking up container %q", input)
break
}
- libpodInspectData, err := ctr.Inspect(c.Bool("size"))
+ libpodInspectData, err := ctr.Inspect(size)
if err != nil {
inspectError = errors.Wrapf(err, "error getting libpod container inspect data %s", ctr.ID())
break
@@ -160,7 +159,7 @@ func iterateInput(ctx context.Context, c *cli.Context, args []string, runtime *a
break
}
} else {
- libpodInspectData, err := ctr.Inspect(c.Bool("size"))
+ libpodInspectData, err := ctr.Inspect(size)
if err != nil {
inspectError = errors.Wrapf(err, "error getting libpod container inspect data %s", ctr.ID())
break
diff --git a/cmd/podman/kill.go b/cmd/podman/kill.go
index cfe4b4218..eb72d53e7 100644
--- a/cmd/podman/kill.go
+++ b/cmd/podman/kill.go
@@ -4,6 +4,7 @@ import (
"fmt"
"syscall"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
@@ -11,68 +12,68 @@ import (
"github.com/docker/docker/pkg/signal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- killFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Signal all running containers",
+ killCommand cliconfig.KillValues
+
+ killDescription = "The main process inside each container specified will be sent SIGKILL, or any signal specified with option --signal."
+ _killCommand = &cobra.Command{
+ Use: "kill",
+ Short: "Kill one or more running containers with a specific signal",
+ Long: killDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ killCommand.InputArgs = args
+ killCommand.GlobalFlags = MainGlobalOpts
+ return killCmd(&killCommand)
},
- cli.StringFlag{
- Name: "signal, s",
- Usage: "Signal to send to the container",
- Value: "KILL",
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
},
- LatestFlag,
- }
- killDescription = "The main process inside each container specified will be sent SIGKILL, or any signal specified with option --signal."
- killCommand = cli.Command{
- Name: "kill",
- Usage: "Kill one or more running containers with a specific signal",
- Description: killDescription,
- Flags: sortFlags(killFlags),
- Action: killCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ Example: `podman kill mywebserver
+ podman kill 860a4b23
+ podman kill --signal TERM ctrID`,
}
)
+func init() {
+ killCommand.Command = _killCommand
+ killCommand.SetUsageTemplate(UsageTemplate())
+ flags := killCommand.Flags()
+
+ flags.BoolVarP(&killCommand.All, "all", "a", false, "Signal all running containers")
+ flags.StringVarP(&killCommand.Signal, "signal", "s", "KILL", "Signal to send to the container")
+ flags.BoolVarP(&killCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
// killCmd kills one or more containers with a signal
-func killCmd(c *cli.Context) error {
+func killCmd(c *cliconfig.KillValues) error {
var (
killFuncs []shared.ParallelWorkerInput
killSignal uint = uint(syscall.SIGTERM)
)
- if err := checkAllAndLatest(c); err != nil {
- return err
- }
-
- if err := validateFlags(c, killFlags); err != nil {
- return err
- }
-
rootless.SetSkipStorageSetup(true)
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- if c.String("signal") != "" {
+ if c.Signal != "" {
// Check if the signalString provided by the user is valid
// Invalid signals will return err
- sysSignal, err := signal.ParseSignal(c.String("signal"))
+ sysSignal, err := signal.ParseSignal(c.Signal)
if err != nil {
return err
}
killSignal = uint(sysSignal)
}
- containers, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+ containers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running")
if err != nil {
if len(containers) == 0 {
return err
@@ -94,7 +95,7 @@ func killCmd(c *cli.Context) error {
maxWorkers := shared.Parallelize("kill")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
diff --git a/cmd/podman/libpodruntime/runtime.go b/cmd/podman/libpodruntime/runtime.go
index dca2f5022..880b281bd 100644
--- a/cmd/podman/libpodruntime/runtime.go
+++ b/cmd/podman/libpodruntime/runtime.go
@@ -1,15 +1,24 @@
package libpodruntime
import (
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
- "github.com/urfave/cli"
)
+// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber
+func GetRuntimeRenumber(c *cliconfig.PodmanCommand) (*libpod.Runtime, error) {
+ return getRuntime(c, true)
+}
+
// GetRuntime generates a new libpod runtime configured by command line options
-func GetRuntime(c *cli.Context) (*libpod.Runtime, error) {
+func GetRuntime(c *cliconfig.PodmanCommand) (*libpod.Runtime, error) {
+ return getRuntime(c, false)
+}
+
+func getRuntime(c *cliconfig.PodmanCommand, renumber bool) (*libpod.Runtime, error) {
options := []libpod.RuntimeOption{}
storageOpts, volumePath, err := util.GetDefaultStoreOptions()
@@ -17,29 +26,39 @@ func GetRuntime(c *cli.Context) (*libpod.Runtime, error) {
return nil, err
}
- if c.IsSet("uidmap") || c.IsSet("gidmap") || c.IsSet("subuidmap") || c.IsSet("subgidmap") {
- mappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidmap"), c.String("subgidmap"))
+ uidmapFlag := c.Flags().Lookup("uidmap")
+ gidmapFlag := c.Flags().Lookup("gidmap")
+ subuidname := c.Flags().Lookup("subuidname")
+ subgidname := c.Flags().Lookup("subgidname")
+ if (uidmapFlag != nil && gidmapFlag != nil && subuidname != nil && subgidname != nil) &&
+ (uidmapFlag.Changed || gidmapFlag.Changed || subuidname.Changed || subgidname.Changed) {
+ uidmapVal, _ := c.Flags().GetStringSlice("uidmap")
+ gidmapVal, _ := c.Flags().GetStringSlice("gidmap")
+ subuidVal, _ := c.Flags().GetString("subuidname")
+ subgidVal, _ := c.Flags().GetString("subgidname")
+ mappings, err := util.ParseIDMapping(uidmapVal, gidmapVal, subuidVal, subgidVal)
if err != nil {
return nil, err
}
storageOpts.UIDMap = mappings.UIDMap
storageOpts.GIDMap = mappings.GIDMap
+
}
- if c.GlobalIsSet("root") {
- storageOpts.GraphRoot = c.GlobalString("root")
+ if c.Flags().Changed("root") {
+ storageOpts.GraphRoot = c.GlobalFlags.Root
}
- if c.GlobalIsSet("runroot") {
- storageOpts.RunRoot = c.GlobalString("runroot")
+ if c.Flags().Changed("runroot") {
+ storageOpts.RunRoot = c.GlobalFlags.Runroot
}
if len(storageOpts.RunRoot) > 50 {
return nil, errors.New("the specified runroot is longer than 50 characters")
}
- if c.GlobalIsSet("storage-driver") {
- storageOpts.GraphDriverName = c.GlobalString("storage-driver")
+ if c.Flags().Changed("storage-driver") {
+ storageOpts.GraphDriverName = c.GlobalFlags.StorageDriver
}
- if c.GlobalIsSet("storage-opt") {
- storageOpts.GraphDriverOptions = c.GlobalStringSlice("storage-opt")
+ if len(c.GlobalFlags.StorageOpts) > 0 {
+ storageOpts.GraphDriverOptions = c.GlobalFlags.StorageOpts
}
options = append(options, libpod.WithStorageConfig(storageOpts))
@@ -47,23 +66,23 @@ func GetRuntime(c *cli.Context) (*libpod.Runtime, error) {
// TODO CLI flags for image config?
// TODO CLI flag for signature policy?
- if c.GlobalIsSet("namespace") {
- options = append(options, libpod.WithNamespace(c.GlobalString("namespace")))
+ if len(c.GlobalFlags.Namespace) > 0 {
+ options = append(options, libpod.WithNamespace(c.GlobalFlags.Namespace))
}
- if c.GlobalIsSet("runtime") {
- options = append(options, libpod.WithOCIRuntime(c.GlobalString("runtime")))
+ if c.Flags().Changed("runtime") {
+ options = append(options, libpod.WithOCIRuntime(c.GlobalFlags.Runtime))
}
- if c.GlobalIsSet("conmon") {
- options = append(options, libpod.WithConmonPath(c.GlobalString("conmon")))
+ if c.Flags().Changed("conmon") {
+ options = append(options, libpod.WithConmonPath(c.GlobalFlags.ConmonPath))
}
- if c.GlobalIsSet("tmpdir") {
- options = append(options, libpod.WithTmpDir(c.GlobalString("tmpdir")))
+ if c.Flags().Changed("tmpdir") {
+ options = append(options, libpod.WithTmpDir(c.GlobalFlags.TmpDir))
}
- if c.GlobalIsSet("cgroup-manager") {
- options = append(options, libpod.WithCgroupManager(c.GlobalString("cgroup-manager")))
+ if c.Flags().Changed("cgroup-manager") {
+ options = append(options, libpod.WithCgroupManager(c.GlobalFlags.CGroupManager))
} else {
if rootless.IsRootless() {
options = append(options, libpod.WithCgroupManager("cgroupfs"))
@@ -73,29 +92,37 @@ func GetRuntime(c *cli.Context) (*libpod.Runtime, error) {
// TODO flag to set libpod static dir?
// TODO flag to set libpod tmp dir?
- if c.GlobalIsSet("cni-config-dir") {
- options = append(options, libpod.WithCNIConfigDir(c.GlobalString("cni-config-dir")))
+ if c.Flags().Changed("cni-config-dir") {
+ options = append(options, libpod.WithCNIConfigDir(c.GlobalFlags.CniConfigDir))
}
- if c.GlobalIsSet("default-mounts-file") {
- options = append(options, libpod.WithDefaultMountsFile(c.GlobalString("default-mounts-file")))
+ if c.Flags().Changed("default-mounts-file") {
+ options = append(options, libpod.WithDefaultMountsFile(c.GlobalFlags.DefaultMountsFile))
}
- if c.GlobalIsSet("hooks-dir") {
- options = append(options, libpod.WithHooksDir(c.GlobalStringSlice("hooks-dir")...))
+ if c.Flags().Changed("hooks-dir") {
+ options = append(options, libpod.WithHooksDir(c.GlobalFlags.HooksDir...))
}
// TODO flag to set CNI plugins dir?
+ // TODO I dont think these belong here?
+ // Will follow up with a different PR to address
+ //
// Pod create options
- if c.IsSet("infra-image") {
- options = append(options, libpod.WithDefaultInfraImage(c.String("infra-image")))
+
+ infraImageFlag := c.Flags().Lookup("infra-image")
+ if infraImageFlag != nil && infraImageFlag.Changed {
+ infraImage, _ := c.Flags().GetString("infra-image")
+ options = append(options, libpod.WithDefaultInfraImage(infraImage))
}
- if c.IsSet("infra-command") {
- options = append(options, libpod.WithDefaultInfraCommand(c.String("infra-command")))
+ infraCommandFlag := c.Flags().Lookup("infra-command")
+ if infraCommandFlag != nil && infraImageFlag.Changed {
+ infraCommand, _ := c.Flags().GetString("infra-command")
+ options = append(options, libpod.WithDefaultInfraCommand(infraCommand))
}
options = append(options, libpod.WithVolumePath(volumePath))
- if c.IsSet("config") {
- return libpod.NewRuntimeFromConfig(c.String("config"), options...)
+ if c.Flags().Changed("config") {
+ return libpod.NewRuntimeFromConfig(c.GlobalFlags.Config, options...)
}
return libpod.NewRuntime(options...)
}
diff --git a/cmd/podman/load.go b/cmd/podman/load.go
index f39ee4487..272cd78d2 100644
--- a/cmd/podman/load.go
+++ b/cmd/podman/load.go
@@ -6,48 +6,43 @@ import (
"io/ioutil"
"os"
- "github.com/containers/image/directory"
- dockerarchive "github.com/containers/image/docker/archive"
- ociarchive "github.com/containers/image/oci/archive"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- loadFlags = []cli.Flag{
- cli.StringFlag{
- Name: "input, i",
- Usage: "Read from archive file, default is STDIN",
- Value: "/dev/stdin",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress the output",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`pathname` of signature policy file (not usually used)",
- },
- }
+ loadCommand cliconfig.LoadValues
+
loadDescription = "Loads the image from docker-archive stored on the local machine."
- loadCommand = cli.Command{
- Name: "load",
- Usage: "Load an image from docker archive",
- Description: loadDescription,
- Flags: sortFlags(loadFlags),
- Action: loadCmd,
- ArgsUsage: "",
- OnUsageError: usageErrorHandler,
+ _loadCommand = &cobra.Command{
+ Use: "load",
+ Short: "Load an image from docker archive",
+ Long: loadDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ loadCommand.InputArgs = args
+ loadCommand.GlobalFlags = MainGlobalOpts
+ return loadCmd(&loadCommand)
+ },
}
)
+func init() {
+ loadCommand.Command = _loadCommand
+ loadCommand.SetUsageTemplate(UsageTemplate())
+ flags := loadCommand.Flags()
+ flags.StringVarP(&loadCommand.Input, "input", "i", "/dev/stdin", "Read from archive file, default is STDIN")
+ flags.BoolVarP(&loadCommand.Quiet, "quiet", "q", false, "Suppress the output")
+ flags.StringVar(&loadCommand.SignaturePolicy, "signature-policy", "", "Pathname of signature policy file (not usually used)")
+
+}
+
// loadCmd gets the image/file to be loaded from the command line
// and calls loadImage to load the image to containers-storage
-func loadCmd(c *cli.Context) error {
+func loadCmd(c *cliconfig.LoadValues) error {
- args := c.Args()
+ args := c.InputArgs
var imageName string
if len(args) == 1 {
@@ -56,18 +51,17 @@ func loadCmd(c *cli.Context) error {
if len(args) > 1 {
return errors.New("too many arguments. Requires exactly 1")
}
- if err := validateFlags(c, loadFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- input := c.String("input")
-
+ input := c.Input
+ if runtime.Remote && len(input) == 0 {
+ return errors.New("the remote client requires you to load via -i and a tarball")
+ }
if input == "/dev/stdin" {
fi, err := os.Stdin.Stat()
if err != nil {
@@ -100,46 +94,10 @@ func loadCmd(c *cli.Context) error {
return err
}
- var writer io.Writer
- if !c.Bool("quiet") {
- writer = os.Stderr
- }
-
- ctx := getContext()
-
- var newImages []*image.Image
- src, err := dockerarchive.ParseReference(input) // FIXME? We should add dockerarchive.NewReference()
- if err == nil {
- newImages, err = runtime.ImageRuntime().LoadFromArchiveReference(ctx, src, c.String("signature-policy"), writer)
- }
+ names, err := runtime.LoadImage(getContext(), imageName, c)
if err != nil {
- // generate full src name with specified image:tag
- src, err := ociarchive.NewReference(input, imageName) // imageName may be ""
- if err == nil {
- newImages, err = runtime.ImageRuntime().LoadFromArchiveReference(ctx, src, c.String("signature-policy"), writer)
- }
- if err != nil {
- src, err := directory.NewReference(input)
- if err == nil {
- newImages, err = runtime.ImageRuntime().LoadFromArchiveReference(ctx, src, c.String("signature-policy"), writer)
- }
- if err != nil {
- return errors.Wrapf(err, "error pulling %q", input)
- }
- }
+ return err
}
- fmt.Println("Loaded image(s): " + getImageNames(newImages))
+ fmt.Println("Loaded image(s): " + names)
return nil
}
-
-func getImageNames(images []*image.Image) string {
- var names string
- for i := range images {
- if i == 0 {
- names = images[i].InputName
- } else {
- names += ", " + images[i].InputName
- }
- }
- return names
-}
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index fc7b39ed8..b02a4b3f9 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -9,55 +9,51 @@ import (
"github.com/containers/image/docker"
"github.com/containers/image/pkg/docker/config"
"github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod/common"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
)
var (
- loginFlags = []cli.Flag{
- cli.StringFlag{
- Name: "password, p",
- Usage: "Password for registry",
- },
- cli.StringFlag{
- Name: "username, u",
- Usage: "Username for registry",
- },
- cli.StringFlag{
- Name: "authfile",
- Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Usage: "Pathname of a directory containing TLS certificates and keys used to connect to the registry",
- },
- cli.BoolTFlag{
- Name: "get-login",
- Usage: "Return the current login user for the registry",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "Require HTTPS and verify certificates when contacting registries (default: true)",
- },
- }
+ loginCommand cliconfig.LoginValues
+
loginDescription = "Login to a container registry on a specified server."
- loginCommand = cli.Command{
- Name: "login",
- Usage: "Login to a container registry",
- Description: loginDescription,
- Flags: sortFlags(loginFlags),
- Action: loginCmd,
- ArgsUsage: "REGISTRY",
- OnUsageError: usageErrorHandler,
+ _loginCommand = &cobra.Command{
+ Use: "login",
+ Short: "Login to a container registry",
+ Long: loginDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ loginCommand.InputArgs = args
+ loginCommand.GlobalFlags = MainGlobalOpts
+ return loginCmd(&loginCommand)
+ },
+ Example: `podman login -u testuser -p testpassword localhost:5000
+ podman login --authfile authdir/myauths.json quay.io
+ podman login -u testuser -p testpassword localhost:5000`,
}
)
+func init() {
+ loginCommand.Command = _loginCommand
+ loginCommand.SetUsageTemplate(UsageTemplate())
+ flags := loginCommand.Flags()
+
+ flags.StringVar(&loginCommand.Authfile, "authfile", "", "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&loginCommand.CertDir, "cert-dir", "", "Pathname of a directory containing TLS certificates and keys used to connect to the registry")
+ flags.BoolVar(&loginCommand.GetLogin, "get-login", true, "Return the current login user for the registry")
+ flags.StringVarP(&loginCommand.Password, "password", "p", "", "Password for registry")
+ flags.BoolVar(&loginCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries (default: true)")
+ flags.StringVarP(&loginCommand.Username, "username", "u", "", "Username for registry")
+ flags.BoolVar(&loginCommand.StdinPassword, "password-stdin", false, "Take the password from stdin")
+
+}
+
// loginCmd uses the authentication package to store a user's authenticated credentials
// in an auth.json file for future use
-func loginCmd(c *cli.Context) error {
- args := c.Args()
+func loginCmd(c *cliconfig.LoginValues) error {
+ args := c.InputArgs
if len(args) > 1 {
return errors.Errorf("too many arguments, login takes only 1 argument")
}
@@ -65,17 +61,17 @@ func loginCmd(c *cli.Context) error {
return errors.Errorf("please specify a registry to login to")
}
server := registryFromFullName(scrubServer(args[0]))
- authfile := getAuthFile(c.String("authfile"))
+ authfile := getAuthFile(c.Authfile)
sc := common.GetSystemContext("", authfile, false)
- if c.IsSet("tls-verify") {
- sc.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ if c.Flag("tls-verify").Changed {
+ sc.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
- if c.String("cert-dir") != "" {
- sc.DockerCertPath = c.String("cert-dir")
+ if c.CertDir != "" {
+ sc.DockerCertPath = c.CertDir
}
- if c.IsSet("get-login") {
+ if c.Flag("get-login").Changed {
user, err := config.GetUserLoggedIn(sc, server)
if err != nil {
@@ -97,8 +93,26 @@ func loginCmd(c *cli.Context) error {
}
ctx := getContext()
+
+ password := c.Password
+
+ if c.Flag("password-stdin").Changed {
+ var stdinPasswordStrBuilder strings.Builder
+ if c.Password != "" {
+ return errors.Errorf("Can't specify both --password-stdin and --password")
+ }
+ if c.Username == "" {
+ return errors.Errorf("Must provide --username with --password-stdin")
+ }
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ fmt.Fprint(&stdinPasswordStrBuilder, scanner.Text())
+ }
+ password = stdinPasswordStrBuilder.String()
+ }
+
// If no username and no password is specified, try to use existing ones.
- if c.String("username") == "" && c.String("password") == "" {
+ if c.Username == "" && password == "" && userFromAuthFile != "" && passFromAuthFile != "" {
fmt.Println("Authenticating with existing credentials...")
if err := docker.CheckAuth(ctx, sc, userFromAuthFile, passFromAuthFile, server); err == nil {
fmt.Println("Existing credentials are valid. Already logged in to", server)
@@ -107,7 +121,7 @@ func loginCmd(c *cli.Context) error {
fmt.Println("Existing credentials are invalid, please enter valid username and password")
}
- username, password, err := getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile)
+ username, password, err := getUserAndPass(c.Username, password, userFromAuthFile)
if err != nil {
return errors.Wrapf(err, "error getting username and password")
}
diff --git a/cmd/podman/logout.go b/cmd/podman/logout.go
index 3cdb606b5..4108887f0 100644
--- a/cmd/podman/logout.go
+++ b/cmd/podman/logout.go
@@ -4,53 +4,58 @@ import (
"fmt"
"github.com/containers/image/pkg/docker/config"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod/common"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- logoutFlags = []cli.Flag{
- cli.StringFlag{
- Name: "authfile",
- Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
- },
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Remove the cached credentials for all registries in the auth file",
- },
- }
+ logoutCommand cliconfig.LogoutValues
logoutDescription = "Remove the cached username and password for the registry."
- logoutCommand = cli.Command{
- Name: "logout",
- Usage: "Logout of a container registry",
- Description: logoutDescription,
- Flags: sortFlags(logoutFlags),
- Action: logoutCmd,
- ArgsUsage: "REGISTRY",
- OnUsageError: usageErrorHandler,
+ _logoutCommand = &cobra.Command{
+ Use: "logout",
+ Short: "Logout of a container registry",
+ Long: logoutDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ logoutCommand.InputArgs = args
+ logoutCommand.GlobalFlags = MainGlobalOpts
+ return logoutCmd(&logoutCommand)
+ },
+ Example: `podman logout docker.io
+ podman logout --authfile authdir/myauths.json docker.io
+ podman logout --all`,
}
)
+func init() {
+ logoutCommand.Command = _logoutCommand
+ logoutCommand.SetUsageTemplate(UsageTemplate())
+ flags := logoutCommand.Flags()
+ flags.BoolVarP(&logoutCommand.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file")
+ flags.StringVar(&logoutCommand.Authfile, "authfile", "", "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override")
+
+}
+
// logoutCmd uses the authentication package to remove the authenticated of a registry
// stored in the auth.json file
-func logoutCmd(c *cli.Context) error {
- args := c.Args()
+func logoutCmd(c *cliconfig.LogoutValues) error {
+ args := c.InputArgs
if len(args) > 1 {
return errors.Errorf("too many arguments, logout takes at most 1 argument")
}
- if len(args) == 0 && !c.IsSet("all") {
+ if len(args) == 0 && !c.All {
return errors.Errorf("registry must be given")
}
var server string
if len(args) == 1 {
server = scrubServer(args[0])
}
- authfile := getAuthFile(c.String("authfile"))
+ authfile := getAuthFile(c.Authfile)
sc := common.GetSystemContext("", authfile, false)
- if c.Bool("all") {
+ if c.All {
if err := config.RemoveAllAuthentication(sc); err != nil {
return err
}
diff --git a/cmd/podman/logs.go b/cmd/podman/logs.go
index 75947c34e..97d835d8f 100644
--- a/cmd/podman/logs.go
+++ b/cmd/podman/logs.go
@@ -4,91 +4,85 @@ import (
"os"
"time"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/logs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- logsFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "details",
- Usage: "Show extra details provided to the logs",
- Hidden: true,
- },
- cli.BoolFlag{
- Name: "follow, f",
- Usage: "Follow log output. The default is false",
- },
- cli.StringFlag{
- Name: "since",
- Usage: "Show logs since TIMESTAMP",
- },
- cli.Uint64Flag{
- Name: "tail",
- Usage: "Output the specified number of LINES at the end of the logs. Defaults to 0, which prints all lines",
- },
- cli.BoolFlag{
- Name: "timestamps, t",
- Usage: "Output the timestamps in the log",
- },
- LatestFlag,
- }
+ logsCommand cliconfig.LogsValues
logsDescription = "The podman logs command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution" +
"order when combined with podman run (i.e. your run may not have generated any logs at the time you execute podman logs"
- logsCommand = cli.Command{
- Name: "logs",
- Usage: "Fetch the logs of a container",
- Description: logsDescription,
- Flags: sortFlags(logsFlags),
- Action: logsCmd,
- ArgsUsage: "CONTAINER",
- SkipArgReorder: true,
- OnUsageError: usageErrorHandler,
- UseShortOptionHandling: true,
+ _logsCommand = &cobra.Command{
+ Use: "logs",
+ Short: "Fetch the logs of a container",
+ Long: logsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ logsCommand.InputArgs = args
+ logsCommand.GlobalFlags = MainGlobalOpts
+ return logsCmd(&logsCommand)
+ },
+ Example: `podman logs ctrID
+ podman logs --tail 2 mywebserver
+ podman logs --follow=true --since 10m ctrID`,
}
)
-func logsCmd(c *cli.Context) error {
+func init() {
+ logsCommand.Command = _logsCommand
+ logsCommand.SetUsageTemplate(UsageTemplate())
+ flags := logsCommand.Flags()
+ flags.BoolVar(&logsCommand.Details, "details", false, "Show extra details provided to the logs")
+ flags.BoolVarP(&logsCommand.Follow, "follow", "f", false, "Follow log output. The default is false")
+ flags.BoolVarP(&waitCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.StringVar(&logsCommand.Since, "since", "", "Show logs since TIMESTAMP")
+ flags.Uint64Var(&logsCommand.Tail, "tail", 0, "Output the specified number of LINES at the end of the logs. Defaults to 0, which prints all lines")
+ flags.BoolVarP(&logsCommand.Timestamps, "timestamps", "t", false, "Output the timestamps in the log")
+ flags.MarkHidden("details")
+
+ flags.SetInterspersed(false)
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func logsCmd(c *cliconfig.LogsValues) error {
var ctr *libpod.Container
var err error
- if err := validateFlags(c, logsFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- args := c.Args()
- if len(args) != 1 && !c.Bool("latest") {
+ args := c.InputArgs
+ if len(args) != 1 && !c.Latest {
return errors.Errorf("'podman logs' requires exactly one container name/ID")
}
sinceTime := time.Time{}
- if c.IsSet("since") {
+ if c.Flag("since").Changed {
// parse time, error out if something is wrong
- since, err := parseInputTime(c.String("since"))
+ since, err := parseInputTime(c.Since)
if err != nil {
- return errors.Wrapf(err, "could not parse time: %q", c.String("since"))
+ return errors.Wrapf(err, "could not parse time: %q", c.Since)
}
sinceTime = since
}
opts := &logs.LogOptions{
- Details: c.Bool("details"),
- Follow: c.Bool("follow"),
+ Details: c.Details,
+ Follow: c.Follow,
Since: sinceTime,
- Tail: c.Uint64("tail"),
- Timestamps: c.Bool("timestamps"),
+ Tail: c.Tail,
+ Timestamps: c.Timestamps,
}
- if c.Bool("latest") {
+ if c.Latest {
ctr, err = runtime.GetLatestContainer()
} else {
ctr, err = runtime.LookupContainer(args[0])
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 8c08b2bfb..19bdb40d6 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -1,230 +1,228 @@
package main
import (
- "fmt"
+ "context"
+ "io"
"log/syslog"
"os"
- "os/exec"
"runtime/pprof"
- "sort"
+ "strings"
"syscall"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
_ "github.com/containers/libpod/pkg/hooks/0.1.0"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/tracing"
"github.com/containers/libpod/version"
"github.com/containers/storage/pkg/reexec"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
lsyslog "github.com/sirupsen/logrus/hooks/syslog"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
// This is populated by the Makefile from the VERSION file
// in the repository
var (
exitCode = 125
+ Ctx context.Context
+ span opentracing.Span
+ closer io.Closer
)
-var cmdsNotRequiringRootless = map[string]bool{
- "help": true,
- "version": true,
- "create": true,
- "exec": true,
- "export": true,
- // `info` must be executed in an user namespace.
- // If this change, please also update libpod.refreshRootless()
- "login": true,
- "logout": true,
- "mount": true,
- "kill": true,
- "pause": true,
- "restart": true,
- "run": true,
- "unpause": true,
- "search": true,
- "stats": true,
- "stop": true,
- "top": true,
+// Commands that the remote and local client have
+// implemented.
+var mainCommands = []*cobra.Command{
+ _buildCommand,
+ _exportCommand,
+ _historyCommand,
+ _imagesCommand,
+ _importCommand,
+ _infoCommand,
+ _inspectCommand,
+ _killCommand,
+ _loadCommand,
+ podCommand.Command,
+ _pullCommand,
+ _pushCommand,
+ _rmiCommand,
+ _saveCommand,
+ _tagCommand,
+ _versionCommand,
+ imageCommand.Command,
+ systemCommand.Command,
}
-type commandSorted []cli.Command
-
-func (a commandSorted) Len() int { return len(a) }
-func (a commandSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type commandSortedAlpha struct{ commandSorted }
-
-func (a commandSortedAlpha) Less(i, j int) bool {
- return a.commandSorted[i].Name < a.commandSorted[j].Name
+var cmdsNotRequiringRootless = map[*cobra.Command]bool{
+ _versionCommand: true,
+ _createCommand: true,
+ _execCommand: true,
+ _cpCommand: true,
+ _exportCommand: true,
+ //// `info` must be executed in an user namespace.
+ //// If this change, please also update libpod.refreshRootless()
+ _loginCommand: true,
+ _logoutCommand: true,
+ _mountCommand: true,
+ _killCommand: true,
+ _pauseCommand: true,
+ _restartCommand: true,
+ _runCommand: true,
+ _unpauseCommand: true,
+ _searchCommand: true,
+ _statsCommand: true,
+ _stopCommand: true,
+ _topCommand: true,
}
-type flagSorted []cli.Flag
-
-func (a flagSorted) Len() int { return len(a) }
-func (a flagSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type flagSortedAlpha struct{ flagSorted }
-
-func (a flagSortedAlpha) Less(i, j int) bool {
- return a.flagSorted[i].GetName() < a.flagSorted[j].GetName()
+var rootCmd = &cobra.Command{
+ Use: "podman",
+ Long: "manage pods and images",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return cmd.Help()
+ },
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ return before(cmd, args)
+ },
+ PersistentPostRunE: func(cmd *cobra.Command, args []string) error {
+ return after(cmd, args)
+ },
+ SilenceUsage: true,
+ SilenceErrors: true,
}
-func main() {
- debug := false
- cpuProfile := false
-
- if reexec.Init() {
- return
- }
+var MainGlobalOpts cliconfig.MainFlags
+
+func init() {
+ cobra.OnInitialize(initConfig)
+ rootCmd.TraverseChildren = true
+ rootCmd.Version = version.Version
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.CGroupManager, "cgroup-manager", "", "Cgroup manager to use (cgroupfs or systemd, default systemd)")
+ // -c is deprecated due to conflict with -c on subcommands
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.CpuProfile, "cpu-profile", "", "Path for the cpu profiling results")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Config, "config", "", "Path of a libpod config file detailing container server configuration options")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.ConmonPath, "conmon", "", "Path of the conmon binary")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.CniConfigDir, "cni-config-dir", "", "Path of the configuration directory for CNI networks")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.DefaultMountsFile, "default-mounts-file", "", "Path to default mounts file")
+ rootCmd.PersistentFlags().MarkHidden("defaults-mount-file")
+ rootCmd.PersistentFlags().StringSliceVar(&MainGlobalOpts.HooksDir, "hooks-dir", []string{}, "Set the OCI hooks directory path (may be set multiple times)")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.LogLevel, "log-level", "error", "Log messages above specified level: debug, info, warn, error (default), fatal or panic")
+ rootCmd.PersistentFlags().IntVar(&MainGlobalOpts.MaxWorks, "max-workers", 0, "The maximum number of workers for parallel operations")
+ rootCmd.PersistentFlags().MarkHidden("max-workers")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Namespace, "namespace", "", "Set the libpod namespace, used to create separate views of the containers and pods on the system")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Root, "root", "", "Path to the root directory in which data, including images, is stored")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Runroot, "runroot", "", "Path to the 'run directory' where all state information is stored")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Runtime, "runtime", "", "Path to the OCI-compatible binary used to run containers, default is /usr/bin/runc")
+ // -s is depracated due to conflict with -s on subcommands
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.StorageDriver, "storage-driver", "", "Select which storage driver is used to manage storage of images and containers (default is overlay)")
+ rootCmd.PersistentFlags().StringSliceVar(&MainGlobalOpts.StorageOpts, "storage-opt", []string{}, "Used to pass an option to the storage driver")
+ rootCmd.PersistentFlags().BoolVar(&MainGlobalOpts.Syslog, "syslog", false, "Output logging information to syslog as well as the console")
+
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.TmpDir, "tmpdir", "", "Path to the tmp directory")
+ rootCmd.PersistentFlags().BoolVar(&MainGlobalOpts.Trace, "trace", false, "enable opentracing output")
+ rootCmd.AddCommand(mainCommands...)
+ rootCmd.AddCommand(getMainCommands()...)
- app := cli.NewApp()
- app.Name = "podman"
- app.Usage = "manage pods and images"
- app.OnUsageError = usageErrorHandler
- app.CommandNotFound = commandNotFoundHandler
-
- app.Version = version.Version
-
- app.Commands = []cli.Command{
- containerCommand,
- exportCommand,
- historyCommand,
- imageCommand,
- imagesCommand,
- importCommand,
- infoCommand,
- inspectCommand,
- pullCommand,
- rmiCommand,
- systemCommand,
- tagCommand,
- versionCommand,
- }
-
- app.Commands = append(app.Commands, getAppCommands()...)
- sort.Sort(commandSortedAlpha{app.Commands})
+}
+func initConfig() {
+ // we can do more stuff in here.
+}
- if varlinkCommand != nil {
- app.Commands = append(app.Commands, *varlinkCommand)
+func before(cmd *cobra.Command, args []string) error {
+ if err := libpod.SetXdgRuntimeDir(""); err != nil {
+ logrus.Errorf(err.Error())
+ os.Exit(1)
}
-
- app.Before = func(c *cli.Context) error {
- if err := libpod.SetXdgRuntimeDir(""); err != nil {
- logrus.Errorf(err.Error())
- os.Exit(1)
- }
- args := c.Args()
- if args.Present() && rootless.IsRootless() {
- if _, notRequireRootless := cmdsNotRequiringRootless[args.First()]; !notRequireRootless {
- became, ret, err := rootless.BecomeRootInUserNS()
- if err != nil {
- logrus.Errorf(err.Error())
- os.Exit(1)
- }
- if became {
- os.Exit(ret)
- }
+ if rootless.IsRootless() {
+ notRequireRootless := cmdsNotRequiringRootless[cmd]
+ if !notRequireRootless && !strings.HasPrefix(cmd.Use, "help") {
+ became, ret, err := rootless.BecomeRootInUserNS()
+ if err != nil {
+ logrus.Errorf(err.Error())
+ os.Exit(1)
}
- }
- if c.GlobalBool("syslog") {
- hook, err := lsyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
- if err == nil {
- logrus.AddHook(hook)
+ if became {
+ os.Exit(ret)
}
}
- logLevel := c.GlobalString("log-level")
- if logLevel != "" {
- level, err := logrus.ParseLevel(logLevel)
- if err != nil {
- return err
- }
+ }
- logrus.SetLevel(level)
+ if MainGlobalOpts.Syslog {
+ hook, err := lsyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
+ if err == nil {
+ logrus.AddHook(hook)
}
+ }
- rlimits := new(syscall.Rlimit)
- rlimits.Cur = 1048576
- rlimits.Max = 1048576
- if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
- if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
- return errors.Wrapf(err, "error getting rlimits")
- }
- rlimits.Cur = rlimits.Max
- if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
- return errors.Wrapf(err, "error setting new rlimits")
- }
+ // Set log level
+ level, err := logrus.ParseLevel(MainGlobalOpts.LogLevel)
+ if err != nil {
+ return err
+ }
+ logrus.SetLevel(level)
+
+ rlimits := new(syscall.Rlimit)
+ rlimits.Cur = 1048576
+ rlimits.Max = 1048576
+ if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
+ return errors.Wrapf(err, "error getting rlimits")
}
-
- if rootless.IsRootless() {
- logrus.Info("running as rootless")
+ rlimits.Cur = rlimits.Max
+ if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
+ return errors.Wrapf(err, "error setting new rlimits")
}
+ }
- // Be sure we can create directories with 0755 mode.
- syscall.Umask(0022)
+ if rootless.IsRootless() {
+ logrus.Info("running as rootless")
+ }
- if logLevel == "debug" {
- debug = true
+ // Be sure we can create directories with 0755 mode.
+ syscall.Umask(0022)
+ if cmd.Flag("cpu-profile").Changed {
+ f, err := os.Create(MainGlobalOpts.CpuProfile)
+ if err != nil {
+ return errors.Wrapf(err, "unable to create cpu profiling file %s",
+ MainGlobalOpts.CpuProfile)
}
- if c.GlobalIsSet("cpu-profile") {
- f, err := os.Create(c.GlobalString("cpu-profile"))
- if err != nil {
- return errors.Wrapf(err, "unable to create cpu profiling file %s",
- c.GlobalString("cpu-profile"))
- }
- cpuProfile = true
- pprof.StartCPUProfile(f)
- }
- return nil
- }
- app.After = func(*cli.Context) error {
- // called by Run() when the command handler succeeds
- if cpuProfile {
- pprof.StopCPUProfile()
- }
- return nil
+ pprof.StartCPUProfile(f)
}
- app.Flags = []cli.Flag{
- cli.StringFlag{
- Name: "config, c",
- Usage: "Path of a libpod config file detailing container server configuration options",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "cpu-profile",
- Usage: "Path for the cpu profiling results",
- },
- cli.StringFlag{
- Name: "log-level",
- Usage: "Log messages above specified level: debug, info, warn, error (default), fatal or panic",
- Value: "error",
- },
- cli.StringFlag{
- Name: "tmpdir",
- Usage: "Path to the tmp directory",
- },
+ if cmd.Flag("trace").Changed {
+ var tracer opentracing.Tracer
+ tracer, closer = tracing.Init("podman")
+ opentracing.SetGlobalTracer(tracer)
+
+ span = tracer.StartSpan("before-context")
+
+ Ctx = opentracing.ContextWithSpan(context.Background(), span)
}
+ return nil
+}
- app.Flags = append(app.Flags, getMainAppFlags()...)
- sort.Sort(flagSortedAlpha{app.Flags})
+func after(cmd *cobra.Command, args []string) error {
+ if cmd.Flag("cpu-profile").Changed {
+ pprof.StopCPUProfile()
+ }
+ if cmd.Flag("trace").Changed {
+ span.Finish()
+ closer.Close()
+ }
+ return nil
+}
- // Check if /etc/containers/registries.conf exists when running in
- // in a local environment.
- CheckForRegistries()
+func main() {
+ //debug := false
+ //cpuProfile := false
- if err := app.Run(os.Args); err != nil {
- if debug {
- logrus.Errorf(err.Error())
- } else {
- // Retrieve the exit error from the exec call, if it exists
- if ee, ok := err.(*exec.ExitError); ok {
- if status, ok := ee.Sys().(syscall.WaitStatus); ok {
- exitCode = status.ExitStatus()
- }
- }
- fmt.Fprintln(os.Stderr, err.Error())
- }
+ if reexec.Init() {
+ return
+ }
+ if err := rootCmd.Execute(); err != nil {
+ outputError(err)
} else {
// The exitCode modified from 125, indicates an application
// running inside of a container failed, as opposed to the
@@ -233,6 +231,11 @@ func main() {
if exitCode == 125 {
exitCode = 0
}
+
}
+
+ // Check if /etc/containers/registries.conf exists when running in
+ // in a local environment.
+ CheckForRegistries()
os.Exit(exitCode)
}
diff --git a/cmd/podman/mount.go b/cmd/podman/mount.go
index f71d47434..f4a7bd5ea 100644
--- a/cmd/podman/mount.go
+++ b/cmd/podman/mount.go
@@ -5,15 +5,18 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
of "github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ mountCommand cliconfig.MountValues
+
mountDescription = `
podman mount
Lists all mounted containers mount points
@@ -22,32 +25,33 @@ var (
Mounts the specified container and outputs the mountpoint
`
- mountFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Mount all containers",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Change the output format to Go template",
+ _mountCommand = &cobra.Command{
+ Use: "mount",
+ Short: "Mount a working container's root filesystem",
+ Long: mountDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ mountCommand.InputArgs = args
+ mountCommand.GlobalFlags = MainGlobalOpts
+ return mountCmd(&mountCommand)
},
- cli.BoolFlag{
- Name: "notruncate",
- Usage: "Do not truncate output",
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, true)
},
- LatestFlag,
- }
- mountCommand = cli.Command{
- Name: "mount",
- Usage: "Mount a working container's root filesystem",
- Description: mountDescription,
- Action: mountCmd,
- ArgsUsage: "[CONTAINER-NAME-OR-ID [...]]",
- Flags: sortFlags(mountFlags),
- OnUsageError: usageErrorHandler,
}
)
+func init() {
+ mountCommand.Command = _mountCommand
+ mountCommand.SetUsageTemplate(UsageTemplate())
+ flags := mountCommand.Flags()
+ flags.BoolVarP(&mountCommand.All, "all", "a", false, "Mount all containers")
+ flags.StringVar(&mountCommand.Format, "format", "", "Change the output format to Go template")
+ flags.BoolVarP(&mountCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.BoolVar(&mountCommand.NoTrunc, "notruncate", false, "Do not truncate output")
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
// jsonMountPoint stores info about each container
type jsonMountPoint struct {
ID string `json:"id"`
@@ -55,15 +59,12 @@ type jsonMountPoint struct {
MountPoint string `json:"mountpoint"`
}
-func mountCmd(c *cli.Context) error {
- if err := validateFlags(c, mountFlags); err != nil {
- return err
- }
+func mountCmd(c *cliconfig.MountValues) error {
if os.Geteuid() != 0 {
rootless.SetSkipStorageSetup(true)
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -85,11 +86,11 @@ func mountCmd(c *cli.Context) error {
}
}
- if c.Bool("all") && c.Bool("latest") {
+ if c.All && c.Latest {
return errors.Errorf("--all and --latest cannot be used together")
}
- mountContainers, err := getAllOrLatestContainers(c, runtime, -1, "all")
+ mountContainers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
if err != nil {
if len(mountContainers) == 0 {
return err
@@ -102,9 +103,9 @@ func mountCmd(c *cli.Context) error {
of.JSONString: true,
}
- json := c.String("format") == of.JSONString
- if !formats[c.String("format")] {
- return errors.Errorf("%q is not a supported format", c.String("format"))
+ json := c.Format == of.JSONString
+ if !formats[c.Format] {
+ return errors.Errorf("%q is not a supported format", c.Format)
}
var lastError error
@@ -149,7 +150,7 @@ func mountCmd(c *cli.Context) error {
continue
}
- if c.Bool("notruncate") {
+ if c.NoTrunc {
fmt.Printf("%-64s %s\n", container.ID(), mountPoint)
} else {
fmt.Printf("%-12.12s %s\n", container.ID(), mountPoint)
diff --git a/cmd/podman/pause.go b/cmd/podman/pause.go
index 2e7182871..94bb0edfe 100644
--- a/cmd/podman/pause.go
+++ b/cmd/podman/pause.go
@@ -3,38 +3,46 @@ package main
import (
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- pauseFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Pause all running containers",
- },
- }
+ pauseCommand cliconfig.PauseValues
pauseDescription = `
podman pause
Pauses one or more running containers. The container name or ID can be used.
`
- pauseCommand = cli.Command{
- Name: "pause",
- Usage: "Pause all the processes in one or more containers",
- Description: pauseDescription,
- Flags: pauseFlags,
- Action: pauseCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
- OnUsageError: usageErrorHandler,
+ _pauseCommand = &cobra.Command{
+ Use: "pause",
+ Short: "Pause all the processes in one or more containers",
+ Long: pauseDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ pauseCommand.InputArgs = args
+ pauseCommand.GlobalFlags = MainGlobalOpts
+ return pauseCmd(&pauseCommand)
+ },
+ Example: `podman pause mywebserver
+ podman pause 860a4b23
+ podman stop -a`,
}
)
-func pauseCmd(c *cli.Context) error {
+func init() {
+ pauseCommand.Command = _pauseCommand
+ pauseCommand.SetUsageTemplate(UsageTemplate())
+ flags := pauseCommand.Flags()
+ flags.BoolVarP(&pauseCommand.All, "all", "a", false, "Pause all running containers")
+
+}
+
+func pauseCmd(c *cliconfig.PauseValues) error {
var (
pauseContainers []*libpod.Container
pauseFuncs []shared.ParallelWorkerInput
@@ -43,18 +51,18 @@ func pauseCmd(c *cli.Context) error {
return errors.New("pause is not supported for rootless containers")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- args := c.Args()
- if len(args) < 1 && !c.Bool("all") {
+ args := c.InputArgs
+ if len(args) < 1 && !c.All {
return errors.Errorf("you must provide at least one container name or id")
}
- if c.Bool("all") {
- containers, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+ if c.All {
+ containers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running")
if err != nil {
return err
}
@@ -84,7 +92,7 @@ func pauseCmd(c *cli.Context) error {
maxWorkers := shared.Parallelize("pause")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
diff --git a/cmd/podman/play.go b/cmd/podman/play.go
index 4e09b2689..495a1f170 100644
--- a/cmd/podman/play.go
+++ b/cmd/podman/play.go
@@ -1,23 +1,22 @@
package main
import (
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
var (
- playSubCommands = []cli.Command{
- playKubeCommand,
- }
-
+ playCommand cliconfig.PodmanCommand
playDescription = "Play a pod and its containers from a structured file."
- playCommand = cli.Command{
- Name: "play",
- Usage: "Play a container or pod",
- Description: playDescription,
- ArgsUsage: "",
- Subcommands: playSubCommands,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
- Hidden: true,
+ _playCommand = &cobra.Command{
+ Use: "play",
+ Short: "Play a pod",
+ Long: playDescription,
}
)
+
+func init() {
+ playCommand.Command = _playCommand
+ playCommand.SetUsageTemplate(UsageTemplate())
+ playCommand.AddCommand(getPlaySubCommands()...)
+}
diff --git a/cmd/podman/play_kube.go b/cmd/podman/play_kube.go
index 2d97e0e95..a59460b71 100644
--- a/cmd/podman/play_kube.go
+++ b/cmd/podman/play_kube.go
@@ -8,6 +8,7 @@ import (
"strings"
"github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
@@ -20,51 +21,40 @@ import (
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
"k8s.io/api/core/v1"
)
var (
- playKubeFlags = []cli.Flag{
- cli.StringFlag{
- Name: "authfile",
- Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Usage: "`Pathname` of a directory containing TLS certificates and keys",
- },
- cli.StringFlag{
- Name: "creds",
- Usage: "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress output information when pulling images",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`Pathname` of signature policy file (not usually used)",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "Require HTTPS and verify certificates when contacting registries (default: true)",
- },
- }
+ playKubeCommand cliconfig.KubePlayValues
playKubeDescription = "Play a Pod and its containers based on a Kubrernetes YAML"
- playKubeCommand = cli.Command{
- Name: "kube",
- Usage: "Play a pod based on Kubernetes YAML",
- Description: playKubeDescription,
- Action: playKubeYAMLCmd,
- Flags: sortFlags(playKubeFlags),
- ArgsUsage: "Kubernetes YAML file",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _playKubeCommand = &cobra.Command{
+ Use: "kube",
+ Short: "Play a pod based on Kubernetes YAML",
+ Long: playKubeDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ playKubeCommand.InputArgs = args
+ playKubeCommand.GlobalFlags = MainGlobalOpts
+ return playKubeYAMLCmd(&playKubeCommand)
+ },
+ Example: `podman play kube demo.yml
+ podman play kube --cert-dir /mycertsdir --tls-verify=true --quiet myWebPod`,
}
)
-func playKubeYAMLCmd(c *cli.Context) error {
+func init() {
+ playKubeCommand.Command = _playKubeCommand
+ playKubeCommand.SetUsageTemplate(UsageTemplate())
+ flags := playKubeCommand.Flags()
+ flags.StringVar(&playKubeCommand.Authfile, "authfile", "", "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&playKubeCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
+ flags.StringVar(&playKubeCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
+ flags.BoolVarP(&playKubeCommand.Quiet, "quiet", "q", false, "Suppress output information when pulling images")
+ flags.StringVar(&playKubeCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
+ flags.BoolVar(&playKubeCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries (default: true)")
+}
+
+func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
var (
podOptions []libpod.PodCreateOption
podYAML v1.Pod
@@ -77,7 +67,7 @@ func playKubeYAMLCmd(c *cli.Context) error {
if rootless.IsRootless() {
return errors.Wrapf(libpod.ErrNotImplemented, "rootless users")
}
- args := c.Args()
+ args := c.InputArgs
if len(args) > 1 {
return errors.New("you can only play one kubernetes file at a time")
}
@@ -85,7 +75,7 @@ func playKubeYAMLCmd(c *cli.Context) error {
return errors.New("you must supply at least one file")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -133,20 +123,20 @@ func playKubeYAMLCmd(c *cli.Context) error {
"ipc": fmt.Sprintf("container:%s", podInfraID),
"uts": fmt.Sprintf("container:%s", podInfraID),
}
- if !c.Bool("quiet") {
+ if !c.Quiet {
writer = os.Stderr
}
dockerRegistryOptions := image2.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
- DockerCertPath: c.String("cert-dir"),
+ DockerCertPath: c.CertDir,
}
- if c.IsSet("tls-verify") {
- dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ if c.Flag("tls-verify").Changed {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
for _, container := range podYAML.Spec.Containers {
- newImage, err := runtime.ImageRuntime().New(ctx, container.Image, c.String("signature-policy"), c.String("authfile"), writer, &dockerRegistryOptions, image2.SigningOptions{}, false, nil)
+ newImage, err := runtime.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, false, nil)
if err != nil {
return err
}
@@ -163,7 +153,7 @@ func playKubeYAMLCmd(c *cli.Context) error {
// start the containers
for _, ctr := range containers {
- if err := ctr.Start(ctx); err != nil {
+ if err := ctr.Start(ctx, false); err != nil {
// Making this a hard failure here to avoid a mess
// the other containers are in created status
return err
diff --git a/cmd/podman/pod.go b/cmd/podman/pod.go
index a30361134..c1350bd4d 100644
--- a/cmd/podman/pod.go
+++ b/cmd/podman/pod.go
@@ -1,35 +1,40 @@
package main
import (
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
var (
podDescription = `Manage container pods.
-Pods are a group of one or more containers sharing the same network, pid and ipc namespaces.
-`
- podSubCommands = []cli.Command{
- podCreateCommand,
- podExistsCommand,
- podInspectCommand,
- podKillCommand,
- podPauseCommand,
- podPsCommand,
- podRestartCommand,
- podRmCommand,
- podStartCommand,
- podStatsCommand,
- podStopCommand,
- podTopCommand,
- podUnpauseCommand,
- }
- podCommand = cli.Command{
- Name: "pod",
- Usage: "Manage pods",
- Description: podDescription,
- UseShortOptionHandling: true,
- Subcommands: podSubCommands,
- OnUsageError: usageErrorHandler,
- }
+Pods are a group of one or more containers sharing the same network, pid and ipc namespaces.`
)
+var podCommand = cliconfig.PodmanCommand{
+ Command: &cobra.Command{
+ Use: "pod",
+ Short: "Manage pods",
+ Long: podDescription,
+ },
+}
+
+//podSubCommands are implemented both in local and remote clients
+var podSubCommands = []*cobra.Command{
+ _podCreateCommand,
+ _podExistsCommand,
+ _podInspectCommand,
+ _podKillCommand,
+ _podPauseCommand,
+ _podPsCommand,
+ _podRestartCommand,
+ _podRmCommand,
+ _podStartCommand,
+ _podStopCommand,
+ _podUnpauseCommand,
+}
+
+func init() {
+ podCommand.AddCommand(podSubCommands...)
+ podCommand.AddCommand(getPodSubCommands()...)
+ podCommand.SetUsageTemplate(UsageTemplate())
+}
diff --git a/cmd/podman/pod_create.go b/cmd/podman/pod_create.go
index 967ce7610..f1bbecb84 100644
--- a/cmd/podman/pod_create.go
+++ b/cmd/podman/pod_create.go
@@ -3,175 +3,107 @@ package main
import (
"fmt"
"os"
- "strings"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
// Kernel namespaces shared by default within a pod
DefaultKernelNamespaces = "cgroup,ipc,net,uts"
+ podCreateCommand cliconfig.PodCreateValues
+
+ podCreateDescription = "Creates a new empty pod. The pod ID is then" +
+ " printed to stdout. You can then start it at any time with the" +
+ " podman pod start <pod_id> command. The pod will be created with the" +
+ " initial state 'created'."
+
+ _podCreateCommand = &cobra.Command{
+ Use: "create",
+ Short: "Create a new empty pod",
+ Long: podCreateDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podCreateCommand.InputArgs = args
+ podCreateCommand.GlobalFlags = MainGlobalOpts
+ return podCreateCmd(&podCreateCommand)
+ },
+ }
)
-var podCreateDescription = "Creates a new empty pod. The pod ID is then" +
- " printed to stdout. You can then start it at any time with the" +
- " podman pod start <pod_id> command. The pod will be created with the" +
- " initial state 'created'."
-
-var podCreateFlags = []cli.Flag{
- cli.StringFlag{
- Name: "cgroup-parent",
- Usage: "Set parent cgroup for the pod",
- },
- cli.BoolTFlag{
- Name: "infra",
- Usage: "Create an infra container associated with the pod to share namespaces with",
- },
- cli.StringFlag{
- Name: "infra-image",
- Usage: "The image of the infra container to associate with the pod",
- Value: libpod.DefaultInfraImage,
- },
- cli.StringFlag{
- Name: "infra-command",
- Usage: "The command to run on the infra container when the pod is started",
- Value: libpod.DefaultInfraCommand,
- },
- cli.StringSliceFlag{
- Name: "label-file",
- Usage: "Read in a line delimited file of labels (default [])",
- },
- cli.StringSliceFlag{
- Name: "label, l",
- Usage: "Set metadata on pod (default [])",
- },
- cli.StringFlag{
- Name: "name, n",
- Usage: "Assign a name to the pod",
- },
- cli.StringFlag{
- Name: "pod-id-file",
- Usage: "Write the pod ID to the file",
- },
- cli.StringSliceFlag{
- Name: "publish, p",
- Usage: "Publish a container's port, or a range of ports, to the host (default [])",
- },
- cli.StringFlag{
- Name: "share",
- Usage: "A comma delimited list of kernel namespaces the pod will share",
- Value: DefaultKernelNamespaces,
- },
-}
+func init() {
+ podCreateCommand.Command = _podCreateCommand
+ podCreateCommand.SetUsageTemplate(UsageTemplate())
+ flags := podCreateCommand.Flags()
+ flags.SetInterspersed(false)
+
+ flags.StringVar(&podCreateCommand.CgroupParent, "cgroup-parent", "", "Set parent cgroup for the pod")
+ flags.BoolVar(&podCreateCommand.Infra, "infra", true, "Create an infra container associated with the pod to share namespaces with")
+ flags.StringVar(&podCreateCommand.InfraImage, "infra-image", libpod.DefaultInfraImage, "The image of the infra container to associate with the pod")
+ flags.StringVar(&podCreateCommand.InfraCommand, "infra-command", libpod.DefaultInfraCommand, "The command to run on the infra container when the pod is started")
+ flags.StringSliceVar(&podCreateCommand.LabelFile, "label-file", []string{}, "Read in a line delimited file of labels")
+ flags.StringSliceVarP(&podCreateCommand.Labels, "label", "l", []string{}, "Set metadata on pod (default [])")
+ flags.StringVarP(&podCreateCommand.Name, "name", "n", "", "Assign a name to the pod")
+ flags.StringVar(&podCreateCommand.PodIDFile, "pod-id-file", "", "Write the pod ID to the file")
+ flags.StringSliceVarP(&podCreateCommand.Publish, "publish", "p", []string{}, "Publish a container's port, or a range of ports, to the host (default [])")
+ flags.StringVar(&podCreateCommand.Share, "share", DefaultKernelNamespaces, "A comma delimited list of kernel namespaces the pod will share")
-var podCreateCommand = cli.Command{
- Name: "create",
- Usage: "Create a new empty pod",
- Description: podCreateDescription,
- Flags: sortFlags(podCreateFlags),
- Action: podCreateCmd,
- SkipArgReorder: true,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
}
-func podCreateCmd(c *cli.Context) error {
- var options []libpod.PodCreateOption
- var err error
+func podCreateCmd(c *cliconfig.PodCreateValues) error {
+ var (
+ err error
+ podIdFile *os.File
+ )
- if err = validateFlags(c, createFlags); err != nil {
- return err
+ if len(c.InputArgs) > 0 {
+ return errors.New("podman pod create does not accept any arguments")
}
-
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- var podIdFile *os.File
- if c.IsSet("pod-id-file") && os.Geteuid() == 0 {
- podIdFile, err = libpod.OpenExclusiveFile(c.String("pod-id-file"))
- if err != nil && os.IsExist(err) {
- return errors.Errorf("pod id file exists. Ensure another pod is not using it or delete %s", c.String("pod-id-file"))
- }
- if err != nil {
- return errors.Errorf("error opening pod-id-file %s", c.String("pod-id-file"))
- }
- defer podIdFile.Close()
- defer podIdFile.Sync()
- }
-
- if len(c.StringSlice("publish")) > 0 {
- if !c.BoolT("infra") {
+ if len(c.Publish) > 0 {
+ if !c.Infra {
return errors.Errorf("you must have an infra container to publish port bindings to the host")
}
- if rootless.IsRootless() {
- return errors.Errorf("rootless networking does not allow port binding to the host")
- }
}
- if !c.BoolT("infra") && c.IsSet("share") && c.String("share") != "none" && c.String("share") != "" {
+ if !c.Infra && c.Flag("share").Changed && c.Share != "none" && c.Share != "" {
return errors.Errorf("You cannot share kernel namespaces on the pod level without an infra container")
}
-
- if c.IsSet("cgroup-parent") {
- options = append(options, libpod.WithPodCgroupParent(c.String("cgroup-parent")))
- }
-
- labels, err := getAllLabels(c.StringSlice("label-file"), c.StringSlice("label"))
- if err != nil {
- return errors.Wrapf(err, "unable to process labels")
- }
- if len(labels) != 0 {
- options = append(options, libpod.WithPodLabels(labels))
- }
-
- if c.IsSet("name") {
- options = append(options, libpod.WithPodName(c.String("name")))
- }
-
- if c.BoolT("infra") {
- options = append(options, libpod.WithInfraContainer())
- nsOptions, err := shared.GetNamespaceOptions(strings.Split(c.String("share"), ","))
- if err != nil {
- return err
+ if c.Flag("pod-id-file").Changed && os.Geteuid() == 0 {
+ podIdFile, err = libpod.OpenExclusiveFile(c.PodIDFile)
+ if err != nil && os.IsExist(err) {
+ return errors.Errorf("pod id file exists. Ensure another pod is not using it or delete %s", c.PodIDFile)
}
- options = append(options, nsOptions...)
- }
-
- if len(c.StringSlice("publish")) > 0 {
- portBindings, err := shared.CreatePortBindings(c.StringSlice("publish"))
if err != nil {
- return err
+ return errors.Errorf("error opening pod-id-file %s", c.PodIDFile)
}
- options = append(options, libpod.WithInfraContainerPorts(portBindings))
-
+ defer podIdFile.Close()
+ defer podIdFile.Sync()
}
- // always have containers use pod cgroups
- // User Opt out is not yet supported
- options = append(options, libpod.WithPodCgroups())
- ctx := getContext()
- pod, err := runtime.NewPod(ctx, options...)
+ labels, err := getAllLabels(c.LabelFile, c.Labels)
if err != nil {
- return err
+ return errors.Wrapf(err, "unable to process labels")
}
+ podID, err := runtime.CreatePod(getContext(), c, labels)
+ if err != nil {
+ return errors.Wrapf(err, "unable to create pod")
+ }
if podIdFile != nil {
- _, err = podIdFile.WriteString(pod.ID())
+ _, err = podIdFile.WriteString(podID)
if err != nil {
logrus.Error(err)
}
}
-
- fmt.Printf("%s\n", pod.ID())
-
+ fmt.Printf("%s\n", podID)
return nil
}
diff --git a/cmd/podman/pod_inspect.go b/cmd/podman/pod_inspect.go
index c7bbf31cd..5a32b5c5d 100644
--- a/cmd/podman/pod_inspect.go
+++ b/cmd/podman/pod_inspect.go
@@ -2,46 +2,51 @@ package main
import (
"encoding/json"
-
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podInspectFlags = []cli.Flag{
- LatestPodFlag,
- }
+ podInspectCommand cliconfig.PodInspectValues
podInspectDescription = "Display the configuration for a pod by name or id"
- podInspectCommand = cli.Command{
- Name: "inspect",
- Usage: "Displays a pod configuration",
- Description: podInspectDescription,
- Flags: sortFlags(podInspectFlags),
- Action: podInspectCmd,
- UseShortOptionHandling: true,
- ArgsUsage: "[POD_NAME_OR_ID]",
- OnUsageError: usageErrorHandler,
+ _podInspectCommand = &cobra.Command{
+ Use: "inspect",
+ Short: "Displays a pod configuration",
+ Long: podInspectDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podInspectCommand.InputArgs = args
+ podInspectCommand.GlobalFlags = MainGlobalOpts
+ return podInspectCmd(&podInspectCommand)
+ },
+ Example: `podman pod inspect podID`,
}
)
-func podInspectCmd(c *cli.Context) error {
+func init() {
+ podInspectCommand.Command = _podInspectCommand
+ podInspectCommand.SetUsageTemplate(UsageTemplate())
+ flags := podInspectCommand.Flags()
+ flags.BoolVarP(&podInspectCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func podInspectCmd(c *cliconfig.PodInspectValues) error {
var (
- pod *libpod.Pod
+ pod *adapter.Pod
)
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
- args := c.Args()
- runtime, err := libpodruntime.GetRuntime(c)
+ args := c.InputArgs
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- if c.Bool("latest") {
+ if c.Latest {
pod, err = runtime.GetLatestPod()
if err != nil {
return errors.Wrapf(err, "unable to get latest pod")
diff --git a/cmd/podman/pod_kill.go b/cmd/podman/pod_kill.go
index c8029eb46..aaaae0f7d 100644
--- a/cmd/podman/pod_kill.go
+++ b/cmd/podman/pod_kill.go
@@ -4,46 +4,48 @@ import (
"fmt"
"syscall"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/docker/docker/pkg/signal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podKillFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Kill all containers in all pods",
+ podKillCommand cliconfig.PodKillValues
+ podKillDescription = "The main process of each container inside the specified pod will be sent SIGKILL, or any signal specified with option --signal."
+ _podKillCommand = &cobra.Command{
+ Use: "kill",
+ Short: "Send the specified signal or SIGKILL to containers in pod",
+ Long: podKillDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podKillCommand.InputArgs = args
+ podKillCommand.GlobalFlags = MainGlobalOpts
+ return podKillCmd(&podKillCommand)
},
- cli.StringFlag{
- Name: "signal, s",
- Usage: "Signal to send to the containers in the pod",
- Value: "KILL",
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
},
- LatestPodFlag,
- }
- podKillDescription = "The main process of each container inside the specified pod will be sent SIGKILL, or any signal specified with option --signal."
- podKillCommand = cli.Command{
- Name: "kill",
- Usage: "Send the specified signal or SIGKILL to containers in pod",
- Description: podKillDescription,
- Flags: sortFlags(podKillFlags),
- Action: podKillCmd,
- ArgsUsage: "[POD_NAME_OR_ID]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ Example: `podman pod kill podID
+ podman pod kill --signal TERM mywebserver
+ podman pod kill --latest`,
}
)
-// podKillCmd kills one or more pods with a signal
-func podKillCmd(c *cli.Context) error {
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
+func init() {
+ podKillCommand.Command = _podKillCommand
+ podKillCommand.SetUsageTemplate(UsageTemplate())
+ flags := podKillCommand.Flags()
+ flags.BoolVarP(&podKillCommand.All, "all", "a", false, "Kill all containers in all pods")
+ flags.BoolVarP(&podKillCommand.Latest, "latest", "l", false, "Act on the latest pod podman is aware of")
+ flags.StringVarP(&podKillCommand.Signal, "signal", "s", "KILL", "Signal to send to the containers in the pod")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
- runtime, err := libpodruntime.GetRuntime(c)
+// podKillCmd kills one or more pods with a signal
+func podKillCmd(c *cliconfig.PodKillValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -51,40 +53,30 @@ func podKillCmd(c *cli.Context) error {
var killSignal uint = uint(syscall.SIGTERM)
- if c.String("signal") != "" {
+ if c.Signal != "" {
// Check if the signalString provided by the user is valid
// Invalid signals will return err
- sysSignal, err := signal.ParseSignal(c.String("signal"))
+ sysSignal, err := signal.ParseSignal(c.Signal)
if err != nil {
return err
}
killSignal = uint(sysSignal)
}
- // getPodsFromContext returns an error when a requested pod
- // isn't found. The only fatal error scenerio is when there are no pods
- // in which case the following loop will be skipped.
- pods, lastError := getPodsFromContext(c, runtime)
+ podKillIds, podKillErrors := runtime.KillPods(getContext(), c, killSignal)
+ for _, p := range podKillIds {
+ fmt.Println(p)
+ }
+ if len(podKillErrors) == 0 {
+ return nil
+ }
+ // Grab the last error
+ lastError := podKillErrors[len(podKillErrors)-1]
+ // Remove the last error from the error slice
+ podKillErrors = podKillErrors[:len(podKillErrors)-1]
- for _, pod := range pods {
- ctr_errs, err := pod.Kill(killSignal)
- if ctr_errs != nil {
- for ctr, err := range ctr_errs {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to kill container %q in pod %q", ctr, pod.ID())
- }
- continue
- }
- if err != nil {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to kill pod %q", pod.ID())
- continue
- }
- fmt.Println(pod.ID())
+ for _, err := range podKillErrors {
+ logrus.Errorf("%q", err)
}
return lastError
}
diff --git a/cmd/podman/pod_pause.go b/cmd/podman/pod_pause.go
index f29a0b8d1..284740d22 100644
--- a/cmd/podman/pod_pause.go
+++ b/cmd/podman/pod_pause.go
@@ -2,73 +2,71 @@ package main
import (
"fmt"
-
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podPauseFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Pause all running pods",
+ podPauseCommand cliconfig.PodPauseValues
+ podPauseDescription = `Pauses one or more pods. The pod name or ID can be used.`
+ _podPauseCommand = &cobra.Command{
+ Use: "pause",
+ Short: "Pause one or more pods",
+ Long: podPauseDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podPauseCommand.InputArgs = args
+ podPauseCommand.GlobalFlags = MainGlobalOpts
+ return podPauseCmd(&podPauseCommand)
},
- LatestPodFlag,
- }
- podPauseDescription = `
- Pauses one or more pods. The pod name or ID can be used.
-`
-
- podPauseCommand = cli.Command{
- Name: "pause",
- Usage: "Pause one or more pods",
- Description: podPauseDescription,
- Flags: sortFlags(podPauseFlags),
- Action: podPauseCmd,
- ArgsUsage: "POD-NAME|POD-ID [POD-NAME|POD-ID ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman pod pause podID1 podID2
+ podman pod pause --latest
+ podman pod pause --all`,
}
)
-func podPauseCmd(c *cli.Context) error {
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
+func init() {
+ podPauseCommand.Command = _podPauseCommand
+ podPauseCommand.SetUsageTemplate(UsageTemplate())
+ flags := podPauseCommand.Flags()
+ flags.BoolVarP(&podPauseCommand.All, "all", "a", false, "Pause all running pods")
+ flags.BoolVarP(&podPauseCommand.Latest, "latest", "l", false, "Act on the latest pod podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
- runtime, err := libpodruntime.GetRuntime(c)
+func podPauseCmd(c *cliconfig.PodPauseValues) error {
+ var lastError error
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- // getPodsFromContext returns an error when a requested pod
- // isn't found. The only fatal error scenerio is when there are no pods
- // in which case the following loop will be skipped.
- pods, lastError := getPodsFromContext(c, runtime)
+ pauseIDs, conErrors, pauseErrors := runtime.PausePods(c)
- for _, pod := range pods {
- ctr_errs, err := pod.Pause()
- if ctr_errs != nil {
- for ctr, err := range ctr_errs {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to pause container %q on pod %q", ctr, pod.ID())
- }
- continue
- }
- if err != nil {
+ for _, p := range pauseIDs {
+ fmt.Println(p)
+ }
+ if conErrors != nil && len(conErrors) > 0 {
+ for ctr, err := range conErrors {
if lastError != nil {
logrus.Errorf("%q", lastError)
}
- lastError = errors.Wrapf(err, "unable to pause pod %q", pod.ID())
- continue
+ lastError = errors.Wrapf(err, "unable to pause container %s", ctr)
}
- fmt.Println(pod.ID())
}
-
+ if len(pauseErrors) > 0 {
+ lastError = pauseErrors[len(pauseErrors)-1]
+ // Remove the last error from the error slice
+ pauseErrors = pauseErrors[:len(pauseErrors)-1]
+ }
+ for _, err := range pauseErrors {
+ logrus.Errorf("%q", err)
+ }
return lastError
}
diff --git a/cmd/podman/pod_ps.go b/cmd/podman/pod_ps.go
index 2030b9b04..70e077651 100644
--- a/cmd/podman/pod_ps.go
+++ b/cmd/podman/pod_ps.go
@@ -8,14 +8,15 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
"github.com/docker/go-units"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
const (
@@ -28,6 +29,8 @@ const (
NUM_CTR_INFO = 10
)
+type PodFilter func(*adapter.Pod) bool
+
var (
bc_opts shared.PsOptions
)
@@ -112,101 +115,75 @@ func (a podPsSortedStatus) Less(i, j int) bool {
}
var (
- podPsFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "ctr-names",
- Usage: "Display the container names",
- },
- cli.BoolFlag{
- Name: "ctr-ids",
- Usage: "Display the container UUIDs. If no-trunc is not set they will be truncated",
- },
- cli.BoolFlag{
- Name: "ctr-status",
- Usage: "Display the container status",
- },
- cli.StringFlag{
- Name: "filter, f",
- Usage: "Filter output based on conditions given",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Pretty-print pods to JSON or using a Go template",
- },
- cli.BoolFlag{
- Name: "latest, l",
- Usage: "Show the latest pod created",
- },
- cli.BoolFlag{
- Name: "namespace, ns",
- Usage: "Display namespace information of the pod",
- },
- cli.BoolFlag{
- Name: "no-trunc",
- Usage: "Do not truncate pod and container IDs",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Print the numeric IDs of the pods only",
- },
- cli.StringFlag{
- Name: "sort",
- Usage: "Sort output by created, id, name, or number",
- Value: "created",
- },
- }
+ podPsCommand cliconfig.PodPsValues
+
podPsDescription = "List all pods on system including their names, ids and current state."
- podPsCommand = cli.Command{
- Name: "ps",
- Aliases: []string{"ls", "list"},
- Usage: "List pods",
- Description: podPsDescription,
- Flags: sortFlags(podPsFlags),
- Action: podPsCmd,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _podPsCommand = &cobra.Command{
+ Use: "ps",
+ Aliases: []string{"ls", "list"},
+ Short: "List pods",
+ Long: podPsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podPsCommand.InputArgs = args
+ podPsCommand.GlobalFlags = MainGlobalOpts
+ return podPsCmd(&podPsCommand)
+ },
}
)
-func podPsCmd(c *cli.Context) error {
- if err := validateFlags(c, podPsFlags); err != nil {
- return err
- }
+func init() {
+ podPsCommand.Command = _podPsCommand
+ podPsCommand.SetUsageTemplate(UsageTemplate())
+ flags := podPsCommand.Flags()
+ flags.BoolVar(&podPsCommand.CtrNames, "ctr-names", false, "Display the container names")
+ flags.BoolVar(&podPsCommand.CtrIDs, "ctr-ids", false, "Display the container UUIDs. If no-trunc is not set they will be truncated")
+ flags.BoolVar(&podPsCommand.CtrStatus, "ctr-status", false, "Display the container status")
+ flags.StringVarP(&podPsCommand.Filter, "filter", "f", "", "Filter output based on conditions given")
+ flags.StringVar(&podPsCommand.Format, "format", "", "Pretty-print pods to JSON or using a Go template")
+ flags.BoolVarP(&podPsCommand.Latest, "latest", "l", false, "Act on the latest pod podman is aware of")
+ flags.BoolVar(&podPsCommand.Namespace, "namespace", false, "Display namespace information of the pod")
+ flags.BoolVar(&podPsCommand.Namespace, "ns", false, "Display namespace information of the pod")
+ flags.BoolVar(&podPsCommand.NoTrunc, "no-trunc", false, "Do not truncate pod and container IDs")
+ flags.BoolVarP(&podPsCommand.Quiet, "quiet", "q", false, "Print the numeric IDs of the pods only")
+ flags.StringVar(&podPsCommand.Sort, "sort", "created", "Sort output by created, id, name, or number")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+func podPsCmd(c *cliconfig.PodPsValues) error {
if err := podPsCheckFlagsPassed(c); err != nil {
return errors.Wrapf(err, "error with flags passed")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- if len(c.Args()) > 0 {
+ if len(c.InputArgs) > 0 {
return errors.Errorf("too many arguments, ps takes no arguments")
}
opts := podPsOptions{
- NoTrunc: c.Bool("no-trunc"),
- Quiet: c.Bool("quiet"),
- Sort: c.String("sort"),
- IdsOfContainers: c.Bool("ctr-ids"),
- NamesOfContainers: c.Bool("ctr-names"),
- StatusOfContainers: c.Bool("ctr-status"),
+ NoTrunc: c.NoTrunc,
+ Quiet: c.Quiet,
+ Sort: c.Sort,
+ IdsOfContainers: c.CtrIDs,
+ NamesOfContainers: c.CtrNames,
+ StatusOfContainers: c.CtrStatus,
}
opts.Format = genPodPsFormat(c)
- var filterFuncs []libpod.PodFilter
- if c.String("filter") != "" {
- filters := strings.Split(c.String("filter"), ",")
+ var filterFuncs []PodFilter
+ if c.Filter != "" {
+ filters := strings.Split(c.Filter, ",")
for _, f := range filters {
filterSplit := strings.Split(f, "=")
if len(filterSplit) < 2 {
return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
}
- generatedFunc, err := generatePodFilterFuncs(filterSplit[0], filterSplit[1], runtime)
+ generatedFunc, err := generatePodFilterFuncs(filterSplit[0], filterSplit[1])
if err != nil {
return errors.Wrapf(err, "invalid filter")
}
@@ -214,8 +191,8 @@ func podPsCmd(c *cli.Context) error {
}
}
- var pods []*libpod.Pod
- if c.IsSet("latest") {
+ var pods []*adapter.Pod
+ if c.Latest {
pod, err := runtime.GetLatestPod()
if err != nil {
return err
@@ -228,7 +205,7 @@ func podPsCmd(c *cli.Context) error {
}
}
- podsFiltered := make([]*libpod.Pod, 0, len(pods))
+ podsFiltered := make([]*adapter.Pod, 0, len(pods))
for _, pod := range pods {
include := true
for _, filter := range filterFuncs {
@@ -240,17 +217,17 @@ func podPsCmd(c *cli.Context) error {
}
}
- return generatePodPsOutput(podsFiltered, opts, runtime)
+ return generatePodPsOutput(podsFiltered, opts)
}
// podPsCheckFlagsPassed checks if mutually exclusive flags are passed together
-func podPsCheckFlagsPassed(c *cli.Context) error {
+func podPsCheckFlagsPassed(c *cliconfig.PodPsValues) error {
// quiet, and format with Go template are mutually exclusive
flags := 0
- if c.Bool("quiet") {
+ if c.Quiet {
flags++
}
- if c.IsSet("format") && c.String("format") != formats.JSONString {
+ if c.Flag("format").Changed && c.Format != formats.JSONString {
flags++
}
if flags > 1 {
@@ -259,10 +236,10 @@ func podPsCheckFlagsPassed(c *cli.Context) error {
return nil
}
-func generatePodFilterFuncs(filter, filterValue string, runtime *libpod.Runtime) (func(pod *libpod.Pod) bool, error) {
+func generatePodFilterFuncs(filter, filterValue string) (func(pod *adapter.Pod) bool, error) {
switch filter {
case "ctr-ids":
- return func(p *libpod.Pod) bool {
+ return func(p *adapter.Pod) bool {
ctrIds, err := p.AllContainersByID()
if err != nil {
return false
@@ -270,7 +247,7 @@ func generatePodFilterFuncs(filter, filterValue string, runtime *libpod.Runtime)
return util.StringInSlice(filterValue, ctrIds)
}, nil
case "ctr-names":
- return func(p *libpod.Pod) bool {
+ return func(p *adapter.Pod) bool {
ctrs, err := p.AllContainers()
if err != nil {
return false
@@ -283,7 +260,7 @@ func generatePodFilterFuncs(filter, filterValue string, runtime *libpod.Runtime)
return false
}, nil
case "ctr-number":
- return func(p *libpod.Pod) bool {
+ return func(p *adapter.Pod) bool {
ctrIds, err := p.AllContainersByID()
if err != nil {
return false
@@ -299,7 +276,7 @@ func generatePodFilterFuncs(filter, filterValue string, runtime *libpod.Runtime)
if !util.StringInSlice(filterValue, []string{"created", "restarting", "running", "paused", "exited", "unknown"}) {
return nil, errors.Errorf("%s is not a valid status", filterValue)
}
- return func(p *libpod.Pod) bool {
+ return func(p *adapter.Pod) bool {
ctr_statuses, err := p.Status()
if err != nil {
return false
@@ -316,19 +293,19 @@ func generatePodFilterFuncs(filter, filterValue string, runtime *libpod.Runtime)
return false
}, nil
case "id":
- return func(p *libpod.Pod) bool {
+ return func(p *adapter.Pod) bool {
return strings.Contains(p.ID(), filterValue)
}, nil
case "name":
- return func(p *libpod.Pod) bool {
+ return func(p *adapter.Pod) bool {
return strings.Contains(p.Name(), filterValue)
}, nil
case "status":
if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created"}) {
return nil, errors.Errorf("%s is not a valid pod status", filterValue)
}
- return func(p *libpod.Pod) bool {
- status, err := shared.GetPodStatus(p)
+ return func(p *adapter.Pod) bool {
+ status, err := p.GetPodStatus()
if err != nil {
return false
}
@@ -342,20 +319,20 @@ func generatePodFilterFuncs(filter, filterValue string, runtime *libpod.Runtime)
}
// generate the template based on conditions given
-func genPodPsFormat(c *cli.Context) string {
+func genPodPsFormat(c *cliconfig.PodPsValues) string {
format := ""
- if c.String("format") != "" {
+ if c.Format != "" {
// "\t" from the command line is not being recognized as a tab
// replacing the string "\t" to a tab character if the user passes in "\t"
- format = strings.Replace(c.String("format"), `\t`, "\t", -1)
- } else if c.Bool("quiet") {
+ format = strings.Replace(c.Format, `\t`, "\t", -1)
+ } else if c.Quiet {
format = formats.IDString
} else {
format = "table {{.ID}}\t{{.Name}}\t{{.Status}}\t{{.Created}}"
if c.Bool("namespace") {
format += "\t{{.Cgroup}}\t{{.Namespaces}}"
}
- if c.Bool("ctr-names") || c.Bool("ctr-ids") || c.Bool("ctr-status") {
+ if c.CtrNames || c.CtrIDs || c.CtrStatus {
format += "\t{{.ContainerInfo}}"
} else {
format += "\t{{.NumberOfContainers}}"
@@ -473,7 +450,7 @@ func getPodTemplateOutput(psParams []podPsJSONParams, opts podPsOptions) ([]podP
return psOutput, nil
}
-func getNamespaces(pod *libpod.Pod) []string {
+func getNamespaces(pod *adapter.Pod) []string {
var shared []string
if pod.SharesPID() {
shared = append(shared, "pid")
@@ -500,7 +477,7 @@ func getNamespaces(pod *libpod.Pod) []string {
}
// getAndSortPodJSONOutput returns the container info in its raw, sorted form
-func getAndSortPodJSONParams(pods []*libpod.Pod, opts podPsOptions, runtime *libpod.Runtime) ([]podPsJSONParams, error) {
+func getAndSortPodJSONParams(pods []*adapter.Pod, opts podPsOptions) ([]podPsJSONParams, error) {
var (
psOutput []podPsJSONParams
)
@@ -512,7 +489,7 @@ func getAndSortPodJSONParams(pods []*libpod.Pod, opts podPsOptions, runtime *lib
return nil, err
}
ctrNum := len(ctrs)
- status, err := shared.GetPodStatus(pod)
+ status, err := pod.GetPodStatus()
if err != nil {
return nil, err
}
@@ -522,7 +499,7 @@ func getAndSortPodJSONParams(pods []*libpod.Pod, opts podPsOptions, runtime *lib
return nil, err
}
for _, ctr := range ctrs {
- batchInfo, err := shared.BatchContainerOp(ctr, bc_opts)
+ batchInfo, err := adapter.BatchContainerOp(ctr, bc_opts)
if err != nil {
return nil, err
}
@@ -564,11 +541,11 @@ func getAndSortPodJSONParams(pods []*libpod.Pod, opts podPsOptions, runtime *lib
return sortPodPsOutput(opts.Sort, psOutput)
}
-func generatePodPsOutput(pods []*libpod.Pod, opts podPsOptions, runtime *libpod.Runtime) error {
+func generatePodPsOutput(pods []*adapter.Pod, opts podPsOptions) error {
if len(pods) == 0 && opts.Format != formats.JSONString {
return nil
}
- psOutput, err := getAndSortPodJSONParams(pods, opts, runtime)
+ psOutput, err := getAndSortPodJSONParams(pods, opts)
if err != nil {
return err
}
diff --git a/cmd/podman/pod_restart.go b/cmd/podman/pod_restart.go
index d9800cbf7..741fce588 100644
--- a/cmd/podman/pod_restart.go
+++ b/cmd/podman/pod_restart.go
@@ -3,70 +3,72 @@ package main
import (
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podRestartFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Restart all pods",
- },
- LatestPodFlag,
- }
+ podRestartCommand cliconfig.PodRestartValues
podRestartDescription = `Restarts one or more pods. The pod ID or name can be used.`
-
- podRestartCommand = cli.Command{
- Name: "restart",
- Usage: "Restart one or more pods",
- Description: podRestartDescription,
- Flags: sortFlags(podRestartFlags),
- Action: podRestartCmd,
- ArgsUsage: "POD-NAME|POD-ID [POD-NAME|POD-ID ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _podRestartCommand = &cobra.Command{
+ Use: "restart",
+ Short: "Restart one or more pods",
+ Long: podRestartDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podRestartCommand.InputArgs = args
+ podRestartCommand.GlobalFlags = MainGlobalOpts
+ return podRestartCmd(&podRestartCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman pod restart podID1 podID2
+ podman pod restart --latest
+ podman pod restart --all`,
}
)
-func podRestartCmd(c *cli.Context) error {
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
+func init() {
+ podRestartCommand.Command = _podRestartCommand
+ podRestartCommand.SetUsageTemplate(UsageTemplate())
+ flags := podRestartCommand.Flags()
+ flags.BoolVarP(&podRestartCommand.All, "all", "a", false, "Restart all running pods")
+ flags.BoolVarP(&podRestartCommand.Latest, "latest", "l", false, "Restart the latest pod podman is aware of")
- runtime, err := libpodruntime.GetRuntime(c)
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func podRestartCmd(c *cliconfig.PodRestartValues) error {
+ var lastError error
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- // getPodsFromContext returns an error when a requested pod
- // isn't found. The only fatal error scenerio is when there are no pods
- // in which case the following loop will be skipped.
- pods, lastError := getPodsFromContext(c, runtime)
+ restartIDs, conErrors, restartErrors := runtime.RestartPods(getContext(), c)
- ctx := getContext()
- for _, pod := range pods {
- ctr_errs, err := pod.Restart(ctx)
- if ctr_errs != nil {
- for ctr, err := range ctr_errs {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to restart container %q on pod %q", ctr, pod.ID())
- }
- continue
- }
- if err != nil {
+ for _, p := range restartIDs {
+ fmt.Println(p)
+ }
+ if conErrors != nil && len(conErrors) > 0 {
+ for ctr, err := range conErrors {
if lastError != nil {
logrus.Errorf("%q", lastError)
}
- lastError = errors.Wrapf(err, "unable to restart pod %q", pod.ID())
- continue
+ lastError = errors.Wrapf(err, "unable to pause container %s", ctr)
}
- fmt.Println(pod.ID())
+ }
+ if len(restartErrors) > 0 {
+ lastError = restartErrors[len(restartErrors)-1]
+ // Remove the last error from the error slice
+ restartErrors = restartErrors[:len(restartErrors)-1]
+ }
+ for _, err := range restartErrors {
+ logrus.Errorf("%q", err)
}
return lastError
}
diff --git a/cmd/podman/pod_rm.go b/cmd/podman/pod_rm.go
index 49f2104cf..ba16d03c7 100644
--- a/cmd/podman/pod_rm.go
+++ b/cmd/podman/pod_rm.go
@@ -3,71 +3,69 @@ package main
import (
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podRmFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Remove all pods",
- },
- cli.BoolFlag{
- Name: "force, f",
- Usage: "Force removal of a running pod by first stopping all containers, then removing all containers in the pod. The default is false",
- },
- LatestPodFlag,
- }
+ podRmCommand cliconfig.PodRmValues
podRmDescription = fmt.Sprintf(`
podman rm will remove one or more pods from the host. The pod name or ID can
be used. A pod with containers will not be removed without --force.
If --force is specified, all containers will be stopped, then removed.
`)
- podRmCommand = cli.Command{
- Name: "rm",
- Usage: "Remove one or more pods",
- Description: podRmDescription,
- Flags: sortFlags(podRmFlags),
- Action: podRmCmd,
- ArgsUsage: "[POD ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _podRmCommand = &cobra.Command{
+ Use: "rm",
+ Short: "Remove one or more pods",
+ Long: podRmDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podRmCommand.InputArgs = args
+ podRmCommand.GlobalFlags = MainGlobalOpts
+ return podRmCmd(&podRmCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman pod rm mywebserverpod
+ podman pod rm -f 860a4b23
+ podman pod rm -f -a`,
}
)
-// saveCmd saves the image to either docker-archive or oci
-func podRmCmd(c *cli.Context) error {
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
+func init() {
+ podRmCommand.Command = _podRmCommand
+ podRmCommand.SetUsageTemplate(UsageTemplate())
+ flags := podRmCommand.Flags()
+ flags.BoolVarP(&podRmCommand.All, "all", "a", false, "Remove all running pods")
+ flags.BoolVarP(&podRmCommand.Force, "force", "f", false, "Force removal of a running pod by first stopping all containers, then removing all containers in the pod. The default is false")
+ flags.BoolVarP(&podRmCommand.Latest, "latest", "l", false, "Remove the latest pod podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
- runtime, err := libpodruntime.GetRuntime(c)
+// podRmCmd deletes pods
+func podRmCmd(c *cliconfig.PodRmValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
+ podRmIds, podRmErrors := runtime.RemovePods(getContext(), c)
+ for _, p := range podRmIds {
+ fmt.Println(p)
+ }
+ if len(podRmErrors) == 0 {
+ return nil
+ }
+ // Grab the last error
+ lastError := podRmErrors[len(podRmErrors)-1]
+ // Remove the last error from the error slice
+ podRmErrors = podRmErrors[:len(podRmErrors)-1]
- ctx := getContext()
- force := c.Bool("force")
-
- // getPodsFromContext returns an error when a requested pod
- // isn't found. The only fatal error scenerio is when there are no pods
- // in which case the following loop will be skipped.
- pods, lastError := getPodsFromContext(c, runtime)
-
- for _, pod := range pods {
- err = runtime.RemovePod(ctx, pod, force, force)
- if err != nil {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "failed to delete pod %v", pod.ID())
- } else {
- fmt.Println(pod.ID())
- }
+ for _, err := range podRmErrors {
+ logrus.Errorf("%q", err)
}
return lastError
}
diff --git a/cmd/podman/pod_start.go b/cmd/podman/pod_start.go
index 2178340a4..5761afd52 100644
--- a/cmd/podman/pod_start.go
+++ b/cmd/podman/pod_start.go
@@ -3,75 +3,68 @@ package main
import (
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podStartFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Start all running pods",
- },
- LatestPodFlag,
- }
+ podStartCommand cliconfig.PodStartValues
podStartDescription = `
podman pod start
Starts one or more pods. The pod name or ID can be used.
`
-
- podStartCommand = cli.Command{
- Name: "start",
- Usage: "Start one or more pods",
- Description: podStartDescription,
- Flags: sortFlags(podStartFlags),
- Action: podStartCmd,
- ArgsUsage: "POD-NAME [POD-NAME ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _podStartCommand = &cobra.Command{
+ Use: "start",
+ Short: "Start one or more pods",
+ Long: podStartDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podStartCommand.InputArgs = args
+ podStartCommand.GlobalFlags = MainGlobalOpts
+ return podStartCmd(&podStartCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman pod start podID
+ podman pod start --latest
+ podman pod start --all`,
}
)
-func podStartCmd(c *cli.Context) error {
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
+func init() {
+ podStartCommand.Command = _podStartCommand
+ podStartCommand.SetUsageTemplate(UsageTemplate())
+ flags := podStartCommand.Flags()
+ flags.BoolVarP(&podStartCommand.All, "all", "a", false, "Start all pods")
+ flags.BoolVarP(&podStartCommand.Latest, "latest", "l", false, "Start the latest pod podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
- runtime, err := libpodruntime.GetRuntime(c)
+func podStartCmd(c *cliconfig.PodStartValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- // getPodsFromContext returns an error when a requested pod
- // isn't found. The only fatal error scenerio is when there are no pods
- // in which case the following loop will be skipped.
- pods, lastError := getPodsFromContext(c, runtime)
-
- ctx := getContext()
- for _, pod := range pods {
- ctr_errs, err := pod.Start(ctx)
- if ctr_errs != nil {
- for ctr, err := range ctr_errs {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to start container %q on pod %q", ctr, pod.ID())
- }
- continue
- }
- if err != nil {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to start pod %q", pod.ID())
- continue
- }
- fmt.Println(pod.ID())
+ podStartIDs, podStartErrors := runtime.StartPods(getContext(), c)
+ for _, p := range podStartIDs {
+ fmt.Println(p)
}
+ if len(podStartErrors) == 0 {
+ return nil
+ }
+ // Grab the last error
+ lastError := podStartErrors[len(podStartErrors)-1]
+ // Remove the last error from the error slice
+ podStartErrors = podStartErrors[:len(podStartErrors)-1]
+ for _, err := range podStartErrors {
+ logrus.Errorf("%q", err)
+ }
return lastError
}
diff --git a/cmd/podman/pod_stats.go b/cmd/podman/pod_stats.go
index 0f0e215e6..907d6a547 100644
--- a/cmd/podman/pod_stats.go
+++ b/cmd/podman/pod_stats.go
@@ -2,59 +2,62 @@ package main
import (
"fmt"
+ "html/template"
+ "os"
+ "reflect"
"strings"
+ "text/tabwriter"
"time"
"encoding/json"
tm "github.com/buger/goterm"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
+ "github.com/spf13/cobra"
"github.com/ulule/deepcopier"
- "github.com/urfave/cli"
)
var (
- podStatsFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Show stats for all pods. Only running pods are shown by default.",
- },
- cli.BoolFlag{
- Name: "no-stream",
- Usage: "Disable streaming stats and only pull the first result, default setting is false",
- },
- cli.BoolFlag{
- Name: "no-reset",
- Usage: "Disable resetting the screen between intervals",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Pretty-print container statistics to JSON or using a Go template",
- }, LatestPodFlag,
- }
+ podStatsCommand cliconfig.PodStatsValues
podStatsDescription = "Display a live stream of resource usage statistics for the containers in or more pods"
- podStatsCommand = cli.Command{
- Name: "stats",
- Usage: "Display percentage of CPU, memory, network I/O, block I/O and PIDs for containers in one or more pods",
- Description: podStatsDescription,
- Flags: sortFlags(podStatsFlags),
- Action: podStatsCmd,
- ArgsUsage: "[POD_NAME_OR_ID]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _podStatsCommand = &cobra.Command{
+ Use: "stats",
+ Short: "Display percentage of CPU, memory, network I/O, block I/O and PIDs for containers in one or more pods",
+ Long: podStatsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podStatsCommand.InputArgs = args
+ podStatsCommand.GlobalFlags = MainGlobalOpts
+ return podStatsCmd(&podStatsCommand)
+ },
+ Example: `podman stats -a --no-stream
+ podman stats --no-reset ctrID
+ podman stats --no-stream --format "table {{.ID}} {{.Name}} {{.MemUsage}}" ctrID`,
}
)
-func podStatsCmd(c *cli.Context) error {
+func init() {
+ podStatsCommand.Command = _podStatsCommand
+ podStatsCommand.SetUsageTemplate(UsageTemplate())
+ flags := podStatsCommand.Flags()
+ flags.BoolVarP(&podStatsCommand.All, "all", "a", false, "Provide stats for all running pods")
+ flags.StringVar(&podStatsCommand.Format, "format", "", "Pretty-print container statistics to JSON or using a Go template")
+ flags.BoolVarP(&podStatsCommand.Latest, "latest", "l", false, "Provide stats on the latest pod podman is aware of")
+ flags.BoolVar(&podStatsCommand.NoStream, "no-stream", false, "Disable streaming stats and only pull the first result, default setting is false")
+ flags.BoolVar(&podStatsCommand.NoReset, "no-reset", false, "Disable resetting the screen between intervals")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func podStatsCmd(c *cliconfig.PodStatsValues) error {
var (
podFunc func() ([]*libpod.Pod, error)
)
- format := c.String("format")
- all := c.Bool("all")
- latest := c.Bool("latest")
+ format := c.Format
+ all := c.All
+ latest := c.Latest
ctr := 0
if all {
ctr += 1
@@ -62,7 +65,7 @@ func podStatsCmd(c *cli.Context) error {
if latest {
ctr += 1
}
- if len(c.Args()) > 0 {
+ if len(c.InputArgs) > 0 {
ctr += 1
}
@@ -73,19 +76,19 @@ func podStatsCmd(c *cli.Context) error {
all = true
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
times := -1
- if c.Bool("no-stream") {
+ if c.NoStream {
times = 1
}
- if len(c.Args()) > 0 {
- podFunc = func() ([]*libpod.Pod, error) { return getPodsByList(c.Args(), runtime) }
+ if len(c.InputArgs) > 0 {
+ podFunc = func() ([]*libpod.Pod, error) { return getPodsByList(c.InputArgs, runtime) }
} else if latest {
podFunc = func() ([]*libpod.Pod, error) {
latestPod, err := runtime.GetLatestPod()
@@ -141,6 +144,25 @@ func podStatsCmd(c *cli.Context) error {
step = 0
}
+ headerNames := make(map[string]string)
+ if c.Format != "" {
+ // Make a map of the field names for the headers
+ v := reflect.ValueOf(podStatOut{})
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ value := strings.ToUpper(splitCamelCase(t.Field(i).Name))
+ switch value {
+ case "CPU":
+ value = value + " %"
+ case "MEM":
+ value = value + " %"
+ case "MEM USAGE":
+ value = "MEM USAGE / LIMIT"
+ }
+ headerNames[t.Field(i).Name] = value
+ }
+ }
+
for i := 0; i < times; i += step {
var newStats []*libpod.PodContainerStats
for _, p := range pods {
@@ -159,7 +181,7 @@ func podStatsCmd(c *cli.Context) error {
newStats = append(newStats, &newPod)
}
//Output
- if strings.ToLower(format) != formats.JSONString && !c.Bool("no-reset") {
+ if strings.ToLower(format) != formats.JSONString && !c.NoReset {
tm.Clear()
tm.MoveCursor(1, 1)
tm.Flush()
@@ -168,7 +190,14 @@ func podStatsCmd(c *cli.Context) error {
outputJson(newStats)
} else {
- outputToStdOut(newStats)
+ results := podContainerStatsToPodStatOut(newStats)
+ if len(format) == 0 {
+ outputToStdOut(results)
+ } else {
+ if err := printPSFormat(c.Format, results, headerNames); err != nil {
+ return err
+ }
+ }
}
time.Sleep(time.Second)
previousPodStats := new([]*libpod.PodContainerStats)
@@ -182,28 +211,88 @@ func podStatsCmd(c *cli.Context) error {
return nil
}
-func outputToStdOut(stats []*libpod.PodContainerStats) {
- outFormat := ("%-14s %-14s %-12s %-6s %-19s %-6s %-19s %-19s %-4s\n")
- fmt.Printf(outFormat, "POD", "CID", "NAME", "CPU %", "MEM USAGE/ LIMIT", "MEM %", "NET IO", "BLOCK IO", "PIDS")
- for _, i := range stats {
- if len(i.ContainerStats) == 0 {
- fmt.Printf(outFormat, i.Pod.ID()[:12], "--", "--", "--", "--", "--", "--", "--", "--")
- }
- for _, c := range i.ContainerStats {
- cpu := floatToPercentString(c.CPU)
- memUsage := combineHumanValues(c.MemUsage, c.MemLimit)
- memPerc := floatToPercentString(c.MemPerc)
- netIO := combineHumanValues(c.NetInput, c.NetOutput)
- blockIO := combineHumanValues(c.BlockInput, c.BlockOutput)
- pids := pidsToString(c.PIDs)
- containerName := c.Name
- if len(c.Name) > 10 {
- containerName = containerName[:10]
+func podContainerStatsToPodStatOut(stats []*libpod.PodContainerStats) []*podStatOut {
+ var out []*podStatOut
+ for _, p := range stats {
+ for _, c := range p.ContainerStats {
+ o := podStatOut{
+ CPU: floatToPercentString(c.CPU),
+ MemUsage: combineHumanValues(c.MemUsage, c.MemLimit),
+ Mem: floatToPercentString(c.MemPerc),
+ NetIO: combineHumanValues(c.NetInput, c.NetOutput),
+ BlockIO: combineHumanValues(c.BlockInput, c.BlockOutput),
+ PIDS: pidsToString(c.PIDs),
+ CID: c.ContainerID[:12],
+ Name: c.Name,
+ Pod: p.Pod.ID()[:12],
}
- fmt.Printf(outFormat, i.Pod.ID()[:12], c.ContainerID[:12], containerName, cpu, memUsage, memPerc, netIO, blockIO, pids)
+ out = append(out, &o)
+ }
+ }
+ return out
+}
+
+type podStatOut struct {
+ CPU string
+ MemUsage string
+ Mem string
+ NetIO string
+ BlockIO string
+ PIDS string
+ Pod string
+ CID string
+ Name string
+}
+
+func printPSFormat(format string, stats []*podStatOut, headerNames map[string]string) error {
+ if len(stats) == 0 {
+ return nil
+ }
+
+ // Use a tabwriter to align column format
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
+ // Spit out the header if "table" is present in the format
+ if strings.HasPrefix(format, "table") {
+ hformat := strings.Replace(strings.TrimSpace(format[5:]), " ", "\t", -1)
+ format = hformat
+ headerTmpl, err := template.New("header").Parse(hformat)
+ if err != nil {
+ return err
+ }
+ if err := headerTmpl.Execute(w, headerNames); err != nil {
+ return err
+ }
+ fmt.Fprintln(w, "")
+ }
+
+ // Spit out the data rows now
+ dataTmpl, err := template.New("data").Parse(format)
+ if err != nil {
+ return err
+ }
+ for _, container := range stats {
+ if err := dataTmpl.Execute(w, container); err != nil {
+ return err
+ }
+ fmt.Fprintln(w, "")
+ }
+ // Flush the writer
+ return w.Flush()
+
+}
+
+func outputToStdOut(stats []*podStatOut) {
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
+ outFormat := ("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n")
+ fmt.Fprintf(w, outFormat, "POD", "CID", "NAME", "CPU %", "MEM USAGE/ LIMIT", "MEM %", "NET IO", "BLOCK IO", "PIDS")
+ for _, i := range stats {
+ if len(stats) == 0 {
+ fmt.Fprintf(w, outFormat, i.Pod, "--", "--", "--", "--", "--", "--", "--", "--")
+ } else {
+ fmt.Fprintf(w, outFormat, i.Pod, i.CID, i.Name, i.CPU, i.MemUsage, i.Mem, i.NetIO, i.BlockIO, i.PIDS)
}
}
- fmt.Println()
+ w.Flush()
}
func getPreviousPodContainerStats(podID string, prev []*libpod.PodContainerStats) map[string]*libpod.ContainerStats {
diff --git a/cmd/podman/pod_stop.go b/cmd/podman/pod_stop.go
index 148b4d518..62d0d4aa5 100644
--- a/cmd/podman/pod_stop.go
+++ b/cmd/podman/pod_stop.go
@@ -2,83 +2,71 @@ package main
import (
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podStopFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Stop all running pods",
- },
- LatestPodFlag,
- cli.UintFlag{
- Name: "timeout, time, t",
- Usage: "Seconds to wait for pod stop before killing the container",
- },
- }
+ podStopCommand cliconfig.PodStopValues
podStopDescription = `
podman pod stop
Stops one or more running pods. The pod name or ID can be used.
`
- podStopCommand = cli.Command{
- Name: "stop",
- Usage: "Stop one or more pods",
- Description: podStopDescription,
- Flags: sortFlags(podStopFlags),
- Action: podStopCmd,
- ArgsUsage: "POD-NAME [POD-NAME ...]",
- OnUsageError: usageErrorHandler,
+ _podStopCommand = &cobra.Command{
+ Use: "stop",
+ Short: "Stop one or more pods",
+ Long: podStopDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podStopCommand.InputArgs = args
+ podStopCommand.GlobalFlags = MainGlobalOpts
+ return podStopCmd(&podStopCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman pod stop mywebserverpod
+ podman pod stop --latest
+ podman pod stop --timeout 0 490eb 3557fb`,
}
)
-func podStopCmd(c *cli.Context) error {
- timeout := -1
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
+func init() {
+ podStopCommand.Command = _podStopCommand
+ podStopCommand.SetUsageTemplate(UsageTemplate())
+ flags := podStopCommand.Flags()
+ flags.BoolVarP(&podStopCommand.All, "all", "a", false, "Stop all running pods")
+ flags.BoolVarP(&podStopCommand.Latest, "latest", "l", false, "Stop the latest pod podman is aware of")
+ flags.UintVarP(&podStopCommand.Timeout, "timeout", "t", 0, "Seconds to wait for pod stop before killing the container")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
- runtime, err := libpodruntime.GetRuntime(c)
+func podStopCmd(c *cliconfig.PodStopValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- // getPodsFromContext returns an error when a requested pod
- // isn't found. The only fatal error scenerio is when there are no pods
- // in which case the following loop will be skipped.
- pods, lastError := getPodsFromContext(c, runtime)
-
- ctx := getContext()
-
- if c.IsSet("timeout") {
- timeout = int(c.Uint("timeout"))
+ podStopIds, podStopErrors := runtime.StopPods(getContext(), c)
+ for _, p := range podStopIds {
+ fmt.Println(p)
+ }
+ if len(podStopErrors) == 0 {
+ return nil
}
- for _, pod := range pods {
- // set cleanup to true to clean mounts and namespaces
- ctr_errs, err := pod.StopWithTimeout(ctx, true, timeout)
- if ctr_errs != nil {
- for ctr, err := range ctr_errs {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to stop container %q on pod %q", ctr, pod.ID())
- }
- continue
- }
- if err != nil {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to stop pod %q", pod.ID())
- continue
- }
- fmt.Println(pod.ID())
+ // Grab the last error
+ lastError := podStopErrors[len(podStopErrors)-1]
+ // Remove the last error from the error slice
+ podStopErrors = podStopErrors[:len(podStopErrors)-1]
+
+ for _, err := range podStopErrors {
+ logrus.Errorf("%q", err)
}
return lastError
}
diff --git a/cmd/podman/pod_top.go b/cmd/podman/pod_top.go
index 1bd1287db..790118496 100644
--- a/cmd/podman/pod_top.go
+++ b/cmd/podman/pod_top.go
@@ -6,21 +6,17 @@ import (
"strings"
"text/tabwriter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podTopFlags = []cli.Flag{
- LatestFlag,
- cli.BoolFlag{
- Name: "list-descriptors",
- Hidden: true,
- },
- }
+ podTopCommand cliconfig.PodTopValues
+
podTopDescription = fmt.Sprintf(`Display the running processes containers in a pod. Specify format descriptors
to alter the output. You may run "podman pod top -l pid pcpu seccomp" to print
the process ID, the CPU percentage and the seccomp mode of each process of
@@ -28,24 +24,37 @@ the latest pod.
%s
`, getDescriptorString())
- podTopCommand = cli.Command{
- Name: "top",
- Usage: "Display the running processes of containers in a pod",
- Description: podTopDescription,
- Flags: sortFlags(podTopFlags),
- Action: podTopCmd,
- ArgsUsage: "POD-NAME [format descriptors]",
- SkipArgReorder: true,
- OnUsageError: usageErrorHandler,
+ _podTopCommand = &cobra.Command{
+ Use: "top",
+ Short: "Display the running processes of containers in a pod",
+ Long: podTopDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podTopCommand.InputArgs = args
+ podTopCommand.GlobalFlags = MainGlobalOpts
+ return podTopCmd(&podTopCommand)
+ },
+ Example: `podman top ctrID
+ podman top --latest
+ podman top --latest pid seccomp args %C`,
}
)
-func podTopCmd(c *cli.Context) error {
+func init() {
+ podTopCommand.Command = _podTopCommand
+ podTopCommand.SetUsageTemplate(UsageTemplate())
+ flags := podTopCommand.Flags()
+ flags.BoolVarP(&podTopCommand.Latest, "latest,", "l", false, "Act on the latest pod podman is aware of")
+ flags.BoolVar(&podTopCommand.ListDescriptors, "list-descriptors", false, "")
+ flags.MarkHidden("list-descriptors")
+
+}
+
+func podTopCmd(c *cliconfig.PodTopValues) error {
var pod *libpod.Pod
var err error
- args := c.Args()
+ args := c.InputArgs
- if c.Bool("list-descriptors") {
+ if c.ListDescriptors {
descriptors, err := libpod.GetContainerPidInformationDescriptors()
if err != nil {
return err
@@ -54,21 +63,18 @@ func podTopCmd(c *cli.Context) error {
return nil
}
- if len(args) < 1 && !c.Bool("latest") {
+ if len(args) < 1 && !c.Latest {
return errors.Errorf("you must provide the name or id of a running pod")
}
- if err := validateFlags(c, podTopFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
var descriptors []string
- if c.Bool("latest") {
+ if c.Latest {
descriptors = args
pod, err = runtime.GetLatestPod()
} else {
diff --git a/cmd/podman/pod_unpause.go b/cmd/podman/pod_unpause.go
index ed1a00cf8..16481d0e2 100644
--- a/cmd/podman/pod_unpause.go
+++ b/cmd/podman/pod_unpause.go
@@ -3,72 +3,71 @@ package main
import (
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- podUnpauseFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Unpause all paused pods",
+ podUnpauseCommand cliconfig.PodUnpauseValues
+ podUnpauseDescription = `Unpauses one or more pods. The pod name or ID can be used.`
+ _podUnpauseCommand = &cobra.Command{
+ Use: "unpause",
+ Short: "Unpause one or more pods",
+ Long: podUnpauseDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ podUnpauseCommand.InputArgs = args
+ podUnpauseCommand.GlobalFlags = MainGlobalOpts
+ return podUnpauseCmd(&podUnpauseCommand)
},
- LatestPodFlag,
- }
- podUnpauseDescription = `
- Unpauses one or more pods. The pod name or ID can be used.
-`
-
- podUnpauseCommand = cli.Command{
- Name: "unpause",
- Usage: "Unpause one or more pods",
- Description: podUnpauseDescription,
- Flags: sortFlags(podUnpauseFlags),
- Action: podUnpauseCmd,
- ArgsUsage: "POD-NAME|POD-ID [POD-NAME|POD-ID ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman pod unpause podID1 podID2
+ podman pod unpause --all
+ podman pod unpause --latest`,
}
)
-func podUnpauseCmd(c *cli.Context) error {
- if err := checkMutuallyExclusiveFlags(c); err != nil {
- return err
- }
+func init() {
+ podUnpauseCommand.Command = _podUnpauseCommand
+ podUnpauseCommand.SetUsageTemplate(UsageTemplate())
+ flags := podUnpauseCommand.Flags()
+ flags.BoolVarP(&podUnpauseCommand.All, "all", "a", false, "Unpause all running pods")
+ flags.BoolVarP(&podUnpauseCommand.Latest, "latest", "l", false, "Unpause the latest pod podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
- runtime, err := libpodruntime.GetRuntime(c)
+func podUnpauseCmd(c *cliconfig.PodUnpauseValues) error {
+ var lastError error
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- // getPodsFromContext returns an error when a requested pod
- // isn't found. The only fatal error scenerio is when there are no pods
- // in which case the following loop will be skipped.
- pods, lastError := getPodsFromContext(c, runtime)
+ unpauseIDs, conErrors, unpauseErrors := runtime.UnpausePods(c)
- for _, pod := range pods {
- ctr_errs, err := pod.Unpause()
- if ctr_errs != nil {
- for ctr, err := range ctr_errs {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "unable to unpause container %q on pod %q", ctr, pod.ID())
- }
- continue
- }
- if err != nil {
+ for _, p := range unpauseIDs {
+ fmt.Println(p)
+ }
+ if conErrors != nil && len(conErrors) > 0 {
+ for ctr, err := range conErrors {
if lastError != nil {
logrus.Errorf("%q", lastError)
}
- lastError = errors.Wrapf(err, "unable to unpause pod %q", pod.ID())
- continue
+ lastError = errors.Wrapf(err, "unable to unpause container %s", ctr)
}
- fmt.Println(pod.ID())
}
-
+ if len(unpauseErrors) > 0 {
+ lastError = unpauseErrors[len(unpauseErrors)-1]
+ // Remove the last error from the error slice
+ unpauseErrors = unpauseErrors[:len(unpauseErrors)-1]
+ }
+ for _, err := range unpauseErrors {
+ logrus.Errorf("%q", err)
+ }
return lastError
}
diff --git a/cmd/podman/port.go b/cmd/podman/port.go
index 6875c648a..bcf372a51 100644
--- a/cmd/podman/port.go
+++ b/cmd/podman/port.go
@@ -5,38 +5,50 @@ import (
"strconv"
"strings"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- portFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Display port information for all containers",
- },
- LatestFlag,
- }
+ portCommand cliconfig.PortValues
portDescription = `
podman port
List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT
`
-
- portCommand = cli.Command{
- Name: "port",
- Usage: "List port mappings or a specific mapping for the container",
- Description: portDescription,
- Flags: sortFlags(portFlags),
- Action: portCmd,
- ArgsUsage: "CONTAINER-NAME [mapping]",
- OnUsageError: usageErrorHandler,
+ _portCommand = &cobra.Command{
+ Use: "port",
+ Short: "List port mappings or a specific mapping for the container",
+ Long: portDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ portCommand.InputArgs = args
+ portCommand.GlobalFlags = MainGlobalOpts
+ return portCmd(&portCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, true)
+ },
+ Example: `podman port --all
+ podman port ctrID 80/tcp
+ podman port --latest 80`,
}
)
-func portCmd(c *cli.Context) error {
+func init() {
+ portCommand.Command = _portCommand
+ portCommand.SetUsageTemplate(UsageTemplate())
+ flags := portCommand.Flags()
+
+ flags.BoolVarP(&portCommand.All, "all", "a", false, "Display port information for all containers")
+ flags.BoolVarP(&portCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func portCmd(c *cliconfig.PortValues) error {
var (
userProto, containerName string
userPort int
@@ -44,29 +56,26 @@ func portCmd(c *cli.Context) error {
containers []*libpod.Container
)
- args := c.Args()
- if err := validateFlags(c, portFlags); err != nil {
- return err
- }
+ args := c.InputArgs
- if c.Bool("latest") && c.Bool("all") {
+ if c.Latest && c.All {
return errors.Errorf("the 'all' and 'latest' options cannot be used together")
}
- if c.Bool("all") && len(args) > 0 {
+ if c.All && len(args) > 0 {
return errors.Errorf("no additional arguments can be used with 'all'")
}
- if len(args) == 0 && !c.Bool("latest") && !c.Bool("all") {
+ if len(args) == 0 && !c.Latest && !c.All {
return errors.Errorf("you must supply a running container name or id")
}
- if !c.Bool("latest") && !c.Bool("all") {
+ if !c.Latest && !c.All {
containerName = args[0]
}
port := ""
- if len(args) > 1 && !c.Bool("latest") {
+ if len(args) > 1 && !c.Latest {
port = args[1]
}
- if len(args) == 1 && c.Bool("latest") {
+ if len(args) == 1 && c.Latest {
port = args[0]
}
if port != "" {
@@ -90,19 +99,19 @@ func portCmd(c *cli.Context) error {
}
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- if !c.Bool("latest") && !c.Bool("all") {
+ if !c.Latest && !c.All {
container, err = runtime.LookupContainer(containerName)
if err != nil {
return errors.Wrapf(err, "unable to find container %s", containerName)
}
containers = append(containers, container)
- } else if c.Bool("latest") {
+ } else if c.Latest {
container, err = runtime.GetLatestContainer()
if err != nil {
return errors.Wrapf(err, "unable to get last created container")
@@ -119,11 +128,16 @@ func portCmd(c *cli.Context) error {
if state, _ := con.State(); state != libpod.ContainerStateRunning {
continue
}
- if c.Bool("all") {
+ if c.All {
fmt.Println(con.ID())
}
+
+ portmappings, err := con.PortMappings()
+ if err != nil {
+ return err
+ }
// Iterate mappings
- for _, v := range con.Config().PortMappings {
+ for _, v := range portmappings {
hostIP := v.HostIP
// Set host IP to 0.0.0.0 if blank
if hostIP == "" {
diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go
index 1708c671c..9c165b836 100644
--- a/cmd/podman/ps.go
+++ b/cmd/podman/ps.go
@@ -12,6 +12,7 @@ import (
"text/tabwriter"
"time"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
@@ -19,9 +20,10 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-units"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/fields"
)
@@ -153,112 +155,86 @@ func (a psSortedSize) Less(i, j int) bool {
}
var (
- psFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Show all the containers, default is only running containers",
- },
- cli.StringSliceFlag{
- Name: "filter, f",
- Usage: "Filter output based on conditions given",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Pretty-print containers to JSON or using a Go template",
- },
- cli.IntFlag{
- Name: "last, n",
- Usage: "Print the n last created containers (all states)",
- Value: -1,
- },
- cli.BoolFlag{
- Name: "latest, l",
- Usage: "Show the latest container created (all states)",
- },
- cli.BoolFlag{
- Name: "namespace, ns",
- Usage: "Display namespace information",
- },
- cli.BoolFlag{
- Name: "no-trunc",
- Usage: "Display the extended information",
- },
- cli.BoolFlag{
- Name: "pod, p",
- Usage: "Print the ID and name of the pod the containers are associated with",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Print the numeric IDs of the containers only",
- },
- cli.BoolFlag{
- Name: "size, s",
- Usage: "Display the total file sizes",
- },
- cli.StringFlag{
- Name: "sort",
- Usage: "Sort output by command, created, id, image, names, runningfor, size, or status",
- Value: "created",
- },
- cli.BoolFlag{
- Name: "sync",
- Usage: "Sync container state with OCI runtime",
- },
- }
+ psCommand cliconfig.PsValues
psDescription = "Prints out information about the containers"
- psCommand = cli.Command{
- Name: "list",
- Aliases: []string{"ls", "ps"},
- Usage: "List containers",
- Description: psDescription,
- Flags: sortFlags(psFlags),
- Action: psCmd,
- ArgsUsage: "",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _psCommand = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls", "ps"},
+ Short: "List containers",
+ Long: psDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ psCommand.InputArgs = args
+ psCommand.GlobalFlags = MainGlobalOpts
+ return psCmd(&psCommand)
+ },
+ Example: `podman ps -a
+ podman ps -a --format "{{.ID}} {{.Image}} {{.Labels}} {{.Mounts}}"
+ podman ps --size --sort names`,
}
)
-func psCmd(c *cli.Context) error {
+func init() {
+ psCommand.Command = _psCommand
+ psCommand.SetUsageTemplate(UsageTemplate())
+ flags := psCommand.Flags()
+ flags.BoolVarP(&psCommand.All, "all", "a", false, "Show all the containers, default is only running containers")
+ flags.StringSliceVarP(&psCommand.Filter, "filter", "f", []string{}, "Filter output based on conditions given")
+ flags.StringVar(&psCommand.Format, "format", "", "Pretty-print containers to JSON or using a Go template")
+ flags.IntVarP(&psCommand.Last, "last", "n", -1, "Print the n last created containers (all states)")
+ flags.BoolVarP(&psCommand.Latest, "latest", "l", false, "Show the latest container created (all states)")
+ flags.BoolVar(&psCommand.Namespace, "namespace", false, "Display namespace information")
+ flags.BoolVar(&psCommand.Namespace, "ns", false, "Display namespace information")
+ flags.BoolVar(&psCommand.NoTrunct, "no-trunc", false, "Display the extended information")
+ flags.BoolVarP(&psCommand.Pod, "pod", "p", false, "Print the ID and name of the pod the containers are associated with")
+ flags.BoolVarP(&psCommand.Quiet, "quiet", "q", false, "Print the numeric IDs of the containers only")
+ flags.BoolVarP(&psCommand.Size, "size", "s", false, "Display the total file sizes")
+ flags.StringVar(&psCommand.Sort, "sort", "created", "Sort output by command, created, id, image, names, runningfor, size, or status")
+ flags.BoolVar(&psCommand.Sync, "sync", false, "Sync container state with OCI runtime")
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func psCmd(c *cliconfig.PsValues) error {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "psCmd")
+ defer span.Finish()
+ }
+
var (
filterFuncs []libpod.ContainerFilter
outputContainers []*libpod.Container
)
- if err := validateFlags(c, psFlags); err != nil {
- return err
- }
-
if err := checkFlagsPassed(c); err != nil {
return errors.Wrapf(err, "error with flags passed")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- if len(c.Args()) > 0 {
+ if len(c.InputArgs) > 0 {
return errors.Errorf("too many arguments, ps takes no arguments")
}
opts := shared.PsOptions{
- All: c.Bool("all"),
- Format: c.String("format"),
- Last: c.Int("last"),
- Latest: c.Bool("latest"),
- NoTrunc: c.Bool("no-trunc"),
- Pod: c.Bool("pod"),
- Quiet: c.Bool("quiet"),
- Size: c.Bool("size"),
- Namespace: c.Bool("namespace"),
- Sort: c.String("sort"),
- Sync: c.Bool("sync"),
- }
-
- filters := c.StringSlice("filter")
+ All: c.All,
+ Format: c.Format,
+ Last: c.Last,
+ Latest: c.Latest,
+ NoTrunc: c.NoTrunct,
+ Pod: c.Pod,
+ Quiet: c.Quiet,
+ Size: c.Size,
+ Namespace: c.Namespace,
+ Sort: c.Sort,
+ Sync: c.Sync,
+ }
+
+ filters := c.Filter
if len(filters) > 0 {
for _, f := range filters {
filterSplit := strings.SplitN(f, "=", 2)
@@ -299,7 +275,7 @@ func psCmd(c *cli.Context) error {
maxWorkers := shared.Parallelize("ps")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
@@ -384,20 +360,20 @@ func printQuiet(containers []shared.PsContainerOutput) error {
}
// checkFlagsPassed checks if mutually exclusive flags are passed together
-func checkFlagsPassed(c *cli.Context) error {
+func checkFlagsPassed(c *cliconfig.PsValues) error {
// latest, and last are mutually exclusive.
- if c.Int("last") >= 0 && c.Bool("latest") {
+ if c.Last >= 0 && c.Latest {
return errors.Errorf("last and latest are mutually exclusive")
}
// Quiet conflicts with size, namespace, and format with a Go template
- if c.Bool("quiet") {
- if c.Bool("size") || c.Bool("namespace") || (c.IsSet("format") &&
- c.String("format") != formats.JSONString) {
+ if c.Quiet {
+ if c.Size || c.Namespace || (c.Flag("format").Changed &&
+ c.Format != formats.JSONString) {
return errors.Errorf("quiet conflicts with size, namespace, and format with go template")
}
}
// Size and namespace conflict with each other
- if c.Bool("size") && c.Bool("namespace") {
+ if c.Size && c.Namespace {
return errors.Errorf("size and namespace options conflict")
}
return nil
diff --git a/cmd/podman/pull.go b/cmd/podman/pull.go
index 2349265d0..71f555162 100644
--- a/cmd/podman/pull.go
+++ b/cmd/podman/pull.go
@@ -6,88 +6,93 @@ import (
"os"
"strings"
+ "github.com/containers/image/docker"
dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod/common"
image2 "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- pullFlags = []cli.Flag{
- cli.StringFlag{
- Name: "authfile",
- Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Usage: "`pathname` of a directory containing TLS certificates and keys",
- },
- cli.StringFlag{
- Name: "creds",
- Usage: "`credentials` (USERNAME:PASSWORD) to use for authenticating to a registry",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress output information when pulling images",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`pathname` of signature policy file (not usually used)",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "Require HTTPS and verify certificates when contacting registries (default: true)",
- },
- }
-
+ pullCommand cliconfig.PullValues
pullDescription = `
Pulls an image from a registry and stores it locally.
An image can be pulled using its tag or digest. If a tag is not
specified, the image with the 'latest' tag (if it exists) is pulled
`
- pullCommand = cli.Command{
- Name: "pull",
- Usage: "Pull an image from a registry",
- Description: pullDescription,
- Flags: sortFlags(pullFlags),
- Action: pullCmd,
- ArgsUsage: "",
- OnUsageError: usageErrorHandler,
+ _pullCommand = &cobra.Command{
+ Use: "pull",
+ Short: "Pull an image from a registry",
+ Long: pullDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ pullCommand.InputArgs = args
+ pullCommand.GlobalFlags = MainGlobalOpts
+ return pullCmd(&pullCommand)
+ },
+ Example: `podman pull imageName
+ podman pull --cert-dir image/certs --authfile temp-auths/myauths.json docker://docker.io/myrepo/finaltest
+ podman pull fedora:latest`,
}
)
+func init() {
+ pullCommand.Command = _pullCommand
+ pullCommand.SetUsageTemplate(UsageTemplate())
+ flags := pullCommand.Flags()
+ flags.BoolVar(&pullCommand.AllTags, "all-tags", false, "All tagged images inthe repository will be pulled")
+ flags.StringVar(&pullCommand.Authfile, "authfile", "", "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&pullCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
+ flags.StringVar(&pullCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
+ flags.BoolVarP(&pullCommand.Quiet, "quiet", "q", false, "Suppress output information when pulling images")
+ flags.StringVar(&pullCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
+ flags.BoolVar(&pullCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries (default: true)")
+
+}
+
// pullCmd gets the data from the command line and calls pullImage
// to copy an image from a registry to a local machine
-func pullCmd(c *cli.Context) error {
- runtime, err := adapter.GetRuntime(c)
+func pullCmd(c *cliconfig.PullValues) error {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "pullCmd")
+ defer span.Finish()
+ }
+
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
+
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- args := c.Args()
+ args := c.InputArgs
if len(args) == 0 {
- logrus.Errorf("an image name must be specified")
- return nil
+ return errors.Errorf("an image name must be specified")
}
if len(args) > 1 {
- logrus.Errorf("too many arguments. Requires exactly 1")
- return nil
+ return errors.Errorf("too many arguments. Requires exactly 1")
}
- if err := validateFlags(c, pullFlags); err != nil {
- return err
+
+ arr := strings.SplitN(args[0], ":", 2)
+ if len(arr) == 2 {
+ if c.Bool("all-tags") {
+ return errors.Errorf("tag can't be used with --all-tags")
+ }
}
+ ctx := getContext()
image := args[0]
var registryCreds *types.DockerAuthConfig
- if c.IsSet("creds") {
- creds, err := util.ParseRegistryCreds(c.String("creds"))
+ if c.Flag("creds").Changed {
+ creds, err := util.ParseRegistryCreds(c.Creds)
if err != nil {
return err
}
@@ -96,18 +101,17 @@ func pullCmd(c *cli.Context) error {
var (
writer io.Writer
- imgID string
)
- if !c.Bool("quiet") {
+ if !c.Quiet {
writer = os.Stderr
}
dockerRegistryOptions := image2.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
- DockerCertPath: c.String("cert-dir"),
+ DockerCertPath: c.CertDir,
}
- if c.IsSet("tls-verify") {
- dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ if c.Flag("tls-verify").Changed {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
// Possible for docker-archive to have multiple tags, so use LoadFromArchiveReference instead
@@ -116,22 +120,62 @@ func pullCmd(c *cli.Context) error {
if err != nil {
return errors.Wrapf(err, "error parsing %q", image)
}
- newImage, err := runtime.LoadFromArchiveReference(getContext(), srcRef, c.String("signature-policy"), writer)
+ newImage, err := runtime.LoadFromArchiveReference(getContext(), srcRef, c.SignaturePolicy, writer)
if err != nil {
return errors.Wrapf(err, "error pulling image from %q", image)
}
- imgID = newImage[0].ID()
+ fmt.Println(newImage[0].ID())
} else {
authfile := getAuthFile(c.String("authfile"))
- newImage, err := runtime.New(getContext(), image, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, true, nil)
+ spec := image
+ systemContext := common.GetSystemContext("", authfile, false)
+ srcRef, err := alltransports.ParseImageName(spec)
if err != nil {
+ dockerTransport := "docker://"
+ logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, dockerTransport, err)
+ spec = dockerTransport + spec
+ srcRef2, err2 := alltransports.ParseImageName(spec)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error parsing image name %q", image)
+ }
+ srcRef = srcRef2
+ }
+ var names []string
+ if c.Bool("all-tags") {
+ if srcRef.DockerReference() == nil {
+ return errors.New("Non-docker transport is currently not supported")
+ }
+ tags, err := docker.GetRepositoryTags(ctx, systemContext, srcRef)
+ if err != nil {
+ return errors.Wrapf(err, "error getting repository tags")
+ }
+ for _, tag := range tags {
+ name := spec + ":" + tag
+ names = append(names, name)
+ }
+ } else {
+ names = append(names, spec)
+ }
+ var foundIDs []string
+ foundImage := true
+ for _, name := range names {
+ newImage, err := runtime.New(getContext(), name, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, true, nil)
+ if err != nil {
+ println(errors.Wrapf(err, "error pulling image %q", name))
+ foundImage = false
+ continue
+ }
+ foundIDs = append(foundIDs, newImage.ID())
+ }
+ if len(names) == 1 && !foundImage {
return errors.Wrapf(err, "error pulling image %q", image)
}
- imgID = newImage.ID()
- }
-
- // Intentionally choosing to ignore if there is an error because
- // outputting the image ID is a NTH and not integral to the pull
- fmt.Println(imgID)
+ if len(names) > 1 {
+ fmt.Println("Pulled Images:")
+ }
+ for _, id := range foundIDs {
+ fmt.Println(id)
+ }
+ } // end else if strings.HasPrefix(image, dockerarchive.Transport.Name()+":")
return nil
}
diff --git a/cmd/podman/push.go b/cmd/podman/push.go
index 361a25e35..56261a8d3 100644
--- a/cmd/podman/push.go
+++ b/cmd/podman/push.go
@@ -9,81 +9,61 @@ import (
"github.com/containers/image/directory"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- pushFlags = []cli.Flag{
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`Pathname` of signature policy file (not usually used)",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "creds",
- Usage: "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Usage: "`Pathname` of a directory containing TLS certificates and keys",
- },
- cli.BoolFlag{
- Name: "compress",
- Usage: "Compress tarball image layers when pushing to a directory using the 'dir' transport. (default is same compression type as source)",
- },
- cli.StringFlag{
- Name: "format, f",
- Usage: "Manifest type (oci, v2s1, or v2s2) to use when pushing an image using the 'dir:' transport (default is manifest type of source)",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "Require HTTPS and verify certificates when contacting registries (default: true)",
- },
- cli.BoolFlag{
- Name: "remove-signatures",
- Usage: "Discard any pre-existing signatures in the image",
- },
- cli.StringFlag{
- Name: "sign-by",
- Usage: "Add a signature at the destination using the specified key",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Don't output progress information when pushing images",
- },
- cli.StringFlag{
- Name: "authfile",
- Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
- },
- }
+ pushCommand cliconfig.PushValues
pushDescription = fmt.Sprintf(`
Pushes an image to a specified location.
The Image "DESTINATION" uses a "transport":"details" format.
See podman-push(1) section "DESTINATION" for the expected format`)
- pushCommand = cli.Command{
- Name: "push",
- Usage: "Push an image to a specified destination",
- Description: pushDescription,
- Flags: sortFlags(pushFlags),
- Action: pushCmd,
- ArgsUsage: "IMAGE DESTINATION",
- OnUsageError: usageErrorHandler,
+ _pushCommand = &cobra.Command{
+ Use: "push",
+ Short: "Push an image to a specified destination",
+ Long: pushDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ pushCommand.InputArgs = args
+ pushCommand.GlobalFlags = MainGlobalOpts
+ return pushCmd(&pushCommand)
+ },
+ Example: `podman push imageID docker://registry.example.com/repository:tag
+ podman push imageID oci-archive:/path/to/layout:image:tag
+ podman push --authfile temp-auths/myauths.json alpine docker://docker.io/myrepo/alpine`,
}
)
-func pushCmd(c *cli.Context) error {
+func init() {
+ pushCommand.Command = _pushCommand
+ pushCommand.SetUsageTemplate(UsageTemplate())
+ flags := pushCommand.Flags()
+ flags.MarkHidden("signature-policy")
+ flags.StringVar(&pushCommand.Authfile, "authfile", "", "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&pushCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
+ flags.BoolVar(&pushCommand.Compress, "compress", false, "Compress tarball image layers when pushing to a directory using the 'dir' transport. (default is same compression type as source)")
+ flags.StringVar(&pushCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
+ flags.StringVarP(&pushCommand.Format, "format", "f", "", "Manifest type (oci, v2s1, or v2s2) to use when pushing an image using the 'dir:' transport (default is manifest type of source)")
+ flags.BoolVarP(&pushCommand.Quiet, "quiet", "q", false, "Don't output progress information when pushing images")
+ flags.BoolVar(&pushCommand.RemoveSignatures, "remove-signatures", false, "Discard any pre-existing signatures in the image")
+ flags.StringVar(&pushCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
+ flags.StringVar(&pushCommand.SignBy, "sign-by", "", "Add a signature at the destination using the specified key")
+ flags.BoolVar(&pushCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries (default: true)")
+}
+
+func pushCmd(c *cliconfig.PushValues) error {
var (
registryCreds *types.DockerAuthConfig
destName string
)
- args := c.Args()
+ args := c.InputArgs
if len(args) == 0 || len(args) > 2 {
return errors.New("podman push requires at least one image name, and optionally a second to specify a different destination name")
}
@@ -94,43 +74,40 @@ func pushCmd(c *cli.Context) error {
case 2:
destName = args[1]
}
- if err := validateFlags(c, pushFlags); err != nil {
- return err
- }
// --compress and --format can only be used for the "dir" transport
splitArg := strings.SplitN(destName, ":", 2)
- if c.IsSet("compress") || c.IsSet("format") {
+ if c.Flag("compress").Changed || c.Flag("format").Changed {
if splitArg[0] != directory.Transport.Name() {
return errors.Errorf("--compress and --format can be set only when pushing to a directory using the 'dir' transport")
}
}
- certPath := c.String("cert-dir")
- removeSignatures := c.Bool("remove-signatures")
- signBy := c.String("sign-by")
+ certPath := c.CertDir
+ removeSignatures := c.RemoveSignatures
+ signBy := c.SignBy
- if c.IsSet("creds") {
- creds, err := util.ParseRegistryCreds(c.String("creds"))
+ if c.Flag("creds").Changed {
+ creds, err := util.ParseRegistryCreds(c.Creds)
if err != nil {
return err
}
registryCreds = creds
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
defer runtime.Shutdown(false)
var writer io.Writer
- if !c.Bool("quiet") {
+ if !c.Quiet {
writer = os.Stderr
}
var manifestType string
- if c.IsSet("format") {
+ if c.Flag("format").Changed {
switch c.String("format") {
case "oci":
manifestType = imgspecv1.MediaTypeImageManifest
@@ -147,8 +124,8 @@ func pushCmd(c *cli.Context) error {
DockerRegistryCreds: registryCreds,
DockerCertPath: certPath,
}
- if c.IsSet("tls-verify") {
- dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ if c.Flag("tls-verify").Changed {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
so := image.SigningOptions{
@@ -156,12 +133,7 @@ func pushCmd(c *cli.Context) error {
SignBy: signBy,
}
- newImage, err := runtime.ImageRuntime().NewFromLocal(srcName)
- if err != nil {
- return err
- }
-
- authfile := getAuthFile(c.String("authfile"))
+ authfile := getAuthFile(c.Authfile)
- return newImage.PushImageToHeuristicDestination(getContext(), destName, manifestType, authfile, c.String("signature-policy"), writer, c.Bool("compress"), so, &dockerRegistryOptions, nil)
+ return runtime.Push(getContext(), srcName, destName, manifestType, authfile, c.SignaturePolicy, writer, c.Compress, so, &dockerRegistryOptions, nil)
}
diff --git a/cmd/podman/refresh.go b/cmd/podman/refresh.go
index b07376170..641748452 100644
--- a/cmd/podman/refresh.go
+++ b/cmd/podman/refresh.go
@@ -4,37 +4,38 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- refreshFlags = []cli.Flag{}
-
+ refreshCommand cliconfig.RefreshValues
refreshDescription = "The refresh command resets the state of all containers to handle database changes after a Podman upgrade. All running containers will be restarted."
-
- refreshCommand = cli.Command{
- Name: "refresh",
- Usage: "Refresh container state",
- Description: refreshDescription,
- Flags: sortFlags(refreshFlags),
- Action: refreshCmd,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _refreshCommand = &cobra.Command{
+ Use: "refresh",
+ Short: "Refresh container state",
+ Long: refreshDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ refreshCommand.InputArgs = args
+ refreshCommand.GlobalFlags = MainGlobalOpts
+ return refreshCmd(&refreshCommand)
+ },
}
)
-func refreshCmd(c *cli.Context) error {
- if len(c.Args()) > 0 {
- return errors.Errorf("refresh does not accept any arguments")
- }
+func init() {
+ refreshCommand.Command = _refreshCommand
+ refreshCommand.SetUsageTemplate(UsageTemplate())
+}
- if err := validateFlags(c, refreshFlags); err != nil {
- return err
+func refreshCmd(c *cliconfig.RefreshValues) error {
+ if len(c.InputArgs) > 0 {
+ return errors.Errorf("refresh does not accept any arguments")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go
index 2e52ce5e4..58fb38874 100644
--- a/cmd/podman/restart.go
+++ b/cmd/podman/restart.go
@@ -4,47 +4,51 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- restartFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Restart all non-running containers",
- },
- cli.BoolFlag{
- Name: "running",
- Usage: "Restart only running containers when --all is used",
+ restartCommand cliconfig.RestartValues
+ restartDescription = `Restarts one or more running containers. The container ID or name can be used. A timeout before forcibly stopping can be set, but defaults to 10 seconds`
+ _restartCommand = &cobra.Command{
+ Use: "restart",
+ Short: "Restart one or more containers",
+ Long: restartDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ restartCommand.InputArgs = args
+ restartCommand.GlobalFlags = MainGlobalOpts
+ return restartCmd(&restartCommand)
},
- cli.UintFlag{
- Name: "timeout, time, t",
- Usage: "Seconds to wait for stop before killing the container",
- Value: libpod.CtrRemoveTimeout,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
},
- LatestFlag,
- }
- restartDescription = `Restarts one or more running containers. The container ID or name can be used. A timeout before forcibly stopping can be set, but defaults to 10 seconds`
-
- restartCommand = cli.Command{
- Name: "restart",
- Usage: "Restart one or more containers",
- Description: restartDescription,
- Flags: sortFlags(restartFlags),
- Action: restartCmd,
- ArgsUsage: "CONTAINER [CONTAINER ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ Example: `podman restart ctrID
+ podman restart --latest
+ podman restart ctrID1 ctrID2`,
}
)
-func restartCmd(c *cli.Context) error {
+func init() {
+ restartCommand.Command = _restartCommand
+ restartCommand.SetUsageTemplate(UsageTemplate())
+ flags := restartCommand.Flags()
+ flags.BoolVarP(&restartCommand.All, "all", "a", false, "Restart all non-running containers")
+ flags.BoolVarP(&restartCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.BoolVar(&restartCommand.Running, "running", false, "Restart only running containers when --all is used")
+ flags.UintVarP(&restartCommand.Timeout, "timeout", "t", libpod.CtrRemoveTimeout, "Seconds to wait for stop before killing the container")
+ flags.UintVar(&restartCommand.Timeout, "time", libpod.CtrRemoveTimeout, "Seconds to wait for stop before killing the container")
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func restartCmd(c *cliconfig.RestartValues) error {
var (
restartFuncs []shared.ParallelWorkerInput
containers []*libpod.Container
@@ -55,34 +59,31 @@ func restartCmd(c *cli.Context) error {
rootless.SetSkipStorageSetup(true)
}
- args := c.Args()
- runOnly := c.Bool("running")
- all := c.Bool("all")
- if len(args) < 1 && !c.Bool("latest") && !all {
+ args := c.InputArgs
+ runOnly := c.Running
+ all := c.All
+ if len(args) < 1 && !c.Latest && !all {
return errors.Wrapf(libpod.ErrInvalidArg, "you must provide at least one container name or ID")
}
- if err := validateFlags(c, restartFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- timeout := c.Uint("timeout")
- useTimeout := c.IsSet("timeout")
+ timeout := c.Timeout
+ useTimeout := c.Flag("timeout").Changed
// Handle --latest
- if c.Bool("latest") {
+ if c.Latest {
lastCtr, err := runtime.GetLatestContainer()
if err != nil {
return errors.Wrapf(err, "unable to get latest container")
}
restartContainers = append(restartContainers, lastCtr)
} else if runOnly {
- containers, err = getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+ containers, err = getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running")
if err != nil {
return err
}
@@ -105,7 +106,7 @@ func restartCmd(c *cli.Context) error {
maxWorkers := shared.Parallelize("restart")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go
index 664475e22..5f6e7b892 100644
--- a/cmd/podman/restore.go
+++ b/cmd/podman/restore.go
@@ -5,68 +5,69 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ restoreCommand cliconfig.RestoreValues
restoreDescription = `
podman container restore
Restores a container from a checkpoint. The container name or ID can be used.
`
- restoreFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "keep, k",
- Usage: "Keep all temporary checkpoint files",
+ _restoreCommand = &cobra.Command{
+ Use: "restore",
+ Short: "Restores one or more containers from a checkpoint",
+ Long: restoreDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ restoreCommand.InputArgs = args
+ restoreCommand.GlobalFlags = MainGlobalOpts
+ return restoreCmd(&restoreCommand)
},
- // restore --all would make more sense if there would be
- // dedicated state for container which are checkpointed.
- // TODO: add ContainerStateCheckpointed
- cli.BoolFlag{
- Name: "tcp-established",
- Usage: "Checkpoint a container with established TCP connections",
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
},
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Restore all checkpointed containers",
- },
- LatestFlag,
- }
- restoreCommand = cli.Command{
- Name: "restore",
- Usage: "Restores one or more containers from a checkpoint",
- Description: restoreDescription,
- Flags: sortFlags(restoreFlags),
- Action: restoreCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
+ Example: `podman container restore ctrID
+ podman container restore --latest
+ podman container restore --all`,
}
)
-func restoreCmd(c *cli.Context) error {
+func init() {
+ restoreCommand.Command = _restoreCommand
+ restoreCommand.SetUsageTemplate(UsageTemplate())
+ flags := restoreCommand.Flags()
+ flags.BoolVarP(&restoreCommand.All, "all", "a", false, "Restore all checkpointed containers")
+ flags.BoolVarP(&restoreCommand.Keep, "keep", "k", false, "Keep all temporary checkpoint files")
+ flags.BoolVarP(&restoreCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ // TODO: add ContainerStateCheckpointed
+ flags.BoolVar(&restoreCommand.TcpEstablished, "tcp-established", false, "Checkpoint a container with established TCP connections")
+
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func restoreCmd(c *cliconfig.RestoreValues) error {
if rootless.IsRootless() {
return errors.New("restoring a container requires root")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
options := libpod.ContainerCheckpointOptions{
- Keep: c.Bool("keep"),
- TCPEstablished: c.Bool("tcp-established"),
- }
-
- if err := checkAllAndLatest(c); err != nil {
- return err
+ Keep: c.Keep,
+ TCPEstablished: c.TcpEstablished,
}
- containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateExited, "checkpointed")
+ containers, lastError := getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateExited, "checkpointed")
for _, ctr := range containers {
if err = ctr.Restore(context.TODO(), options); err != nil {
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 7c0569b78..2dcb491d7 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -2,78 +2,92 @@ package main
import (
"fmt"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- rmFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Remove all containers",
- },
- cli.BoolFlag{
- Name: "force, f",
- Usage: "Force removal of a running container. The default is false",
- },
- LatestFlag,
- cli.BoolFlag{
- Name: "volumes, v",
- Usage: "Remove the volumes associated with the container (Not implemented yet)",
- },
- }
+ rmCommand cliconfig.RmValues
rmDescription = fmt.Sprintf(`
Podman rm will remove one or more containers from the host.
The container name or ID can be used. This does not remove images.
Running containers will not be removed without the -f option.
`)
- rmCommand = cli.Command{
- Name: "rm",
- Usage: "Remove one or more containers",
- Description: rmDescription,
- Flags: sortFlags(rmFlags),
- Action: rmCmd,
- ArgsUsage: "",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _rmCommand = &cobra.Command{
+ Use: "rm",
+ Short: "Remove one or more containers",
+ Long: rmDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ rmCommand.InputArgs = args
+ rmCommand.GlobalFlags = MainGlobalOpts
+ return rmCmd(&rmCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman rm imageID
+ podman rm mywebserver myflaskserver 860a4b23
+ podman rm --force --all`,
}
)
+func init() {
+ rmCommand.Command = _rmCommand
+ rmCommand.SetUsageTemplate(UsageTemplate())
+ flags := rmCommand.Flags()
+ flags.BoolVarP(&rmCommand.All, "all", "a", false, "Remove all containers")
+ flags.BoolVarP(&rmCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
+ flags.BoolVarP(&rmCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove the volumes associated with the container")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
// saveCmd saves the image to either docker-archive or oci
-func rmCmd(c *cli.Context) error {
+func rmCmd(c *cliconfig.RmValues) error {
var (
deleteFuncs []shared.ParallelWorkerInput
)
ctx := getContext()
- if err := validateFlags(c, rmFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- if err := checkAllAndLatest(c); err != nil {
- return err
- }
-
- delContainers, err := getAllOrLatestContainers(c, runtime, -1, "all")
+ failureCnt := 0
+ delContainers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
if err != nil {
+ if c.Force && len(c.InputArgs) > 0 {
+ if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ err = nil
+ } else {
+ failureCnt++
+ }
+ runtime.RemoveContainersFromStorage(c.InputArgs)
+ }
if len(delContainers) == 0 {
+ if err != nil && failureCnt == 0 {
+ exitCode = 1
+ }
return err
}
- fmt.Println(err.Error())
+ if err != nil {
+ fmt.Println(err.Error())
+ }
}
for _, container := range delContainers {
con := container
f := func() error {
- return runtime.RemoveContainer(ctx, con, c.Bool("force"))
+ return runtime.RemoveContainer(ctx, con, c.Force, c.Volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
@@ -83,11 +97,22 @@ func rmCmd(c *cli.Context) error {
}
maxWorkers := shared.Parallelize("rm")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
// Run the parallel funcs
deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
- return printParallelOutput(deleteErrors, errCount)
+ err = printParallelOutput(deleteErrors, errCount)
+ if err != nil {
+ for _, result := range deleteErrors {
+ if result != nil && errors.Cause(result) != image.ErrNoSuchCtr {
+ failureCnt++
+ }
+ }
+ if failureCnt == 0 {
+ exitCode = 1
+ }
+ }
+ return err
}
diff --git a/cmd/podman/rmi.go b/cmd/podman/rmi.go
index 39757272e..709ed14e0 100644
--- a/cmd/podman/rmi.go
+++ b/cmd/podman/rmi.go
@@ -4,66 +4,67 @@ import (
"fmt"
"os"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/containers/storage"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ rmiCommand cliconfig.RmiValues
rmiDescription = "Removes one or more locally stored images."
- rmiFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Remove all images",
- },
- cli.BoolFlag{
- Name: "force, f",
- Usage: "Force removal of the image",
+ _rmiCommand = &cobra.Command{
+ Use: "rmi",
+ Short: "Removes one or more images from local storage",
+ Long: rmiDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ rmiCommand.InputArgs = args
+ rmiCommand.GlobalFlags = MainGlobalOpts
+ return rmiCmd(&rmiCommand)
},
+ Example: `podman rmi imageID
+ podman rmi --force alpine
+ podman rmi c4dfb1609ee2 93fd78260bd1 c0ed59d05ff7`,
}
- rmiCommand = cli.Command{
- Name: "rmi",
- Usage: "Remove one or more images from local storage",
- Description: rmiDescription,
- Action: rmiCmd,
- ArgsUsage: "IMAGE-NAME-OR-ID [...]",
- Flags: sortFlags(rmiFlags),
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+)
+
+func imageNotFound(err error) bool {
+ if errors.Cause(err) == image.ErrNoSuchImage {
+ return true
}
- rmImageCommand = cli.Command{
- Name: "rm",
- Usage: "Removes one or more images from local storage",
- Description: rmiDescription,
- Action: rmiCmd,
- ArgsUsage: "IMAGE-NAME-OR-ID [...]",
- Flags: rmiFlags,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ switch err.(type) {
+ case *iopodman.ImageNotFound:
+ return true
}
-)
+ return false
+}
+
+func init() {
+ rmiCommand.Command = _rmiCommand
+ rmiCommand.SetUsageTemplate(UsageTemplate())
+ flags := rmiCommand.Flags()
+ flags.BoolVarP(&rmiCommand.All, "all", "a", false, "Remove all images")
+ flags.BoolVarP(&rmiCommand.Force, "force", "f", false, "Force Removal of the image")
+}
-func rmiCmd(c *cli.Context) error {
+func rmiCmd(c *cliconfig.RmiValues) error {
var (
- lastError error
- deleted bool
- deleteErr error
- msg string
+ lastError error
+ failureCnt int
)
ctx := getContext()
- if err := validateFlags(c, rmiFlags); err != nil {
- return err
- }
- removeAll := c.Bool("all")
- runtime, err := adapter.GetRuntime(c)
+ removeAll := c.All
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- args := c.Args()
+ args := c.InputArgs
if len(args) == 0 && !removeAll {
return errors.Errorf("image name or ID must be specified")
}
@@ -74,19 +75,21 @@ func rmiCmd(c *cli.Context) error {
images := args[:]
removeImage := func(img *adapter.ContainerImage) {
- deleted = true
- msg, deleteErr = runtime.RemoveImage(ctx, img, c.Bool("force"))
- if deleteErr != nil {
- if errors.Cause(deleteErr) == storage.ErrImageUsedByContainer {
+ msg, err := runtime.RemoveImage(ctx, img, c.Force)
+ if err != nil {
+ if errors.Cause(err) == storage.ErrImageUsedByContainer {
fmt.Printf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\n", img.ID())
}
+ if !imageNotFound(err) {
+ failureCnt++
+ }
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
- lastError = deleteErr
- } else {
- fmt.Println(msg)
+ lastError = err
+ return
}
+ fmt.Println(msg)
}
if removeAll {
@@ -131,22 +134,21 @@ func rmiCmd(c *cli.Context) error {
for _, i := range images {
newImage, err := runtime.NewImageFromLocal(i)
if err != nil {
- fmt.Fprintln(os.Stderr, err)
+ if lastError != nil {
+ if !imageNotFound(lastError) {
+ failureCnt++
+ }
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = err
continue
}
removeImage(newImage)
}
}
- // If the user calls remove all and there are none, it should not be a
- // non-zero exit
- if !deleted && removeAll {
- return nil
- }
- // the user tries to remove images that do not exist, that should be a
- // non-zero exit
- if !deleted {
- return errors.Errorf("no valid images to delete")
+ if imageNotFound(lastError) && failureCnt == 0 {
+ exitCode = 1
}
return lastError
diff --git a/cmd/podman/run.go b/cmd/podman/run.go
index 3ef546940..bea9b1743 100644
--- a/cmd/podman/run.go
+++ b/cmd/podman/run.go
@@ -8,49 +8,64 @@ import (
"strconv"
"strings"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
-var runDescription = "Runs a command in a new container from the given image"
-
-var runFlags []cli.Flag = append(createFlags, cli.BoolTFlag{
- Name: "sig-proxy",
- Usage: "Proxy received signals to the process (default true)",
-})
-
-var runCommand = cli.Command{
- Name: "run",
- Usage: "Run a command in a new container",
- Description: runDescription,
- Flags: sortFlags(runFlags),
- Action: runCmd,
- ArgsUsage: "IMAGE [COMMAND [ARG...]]",
- HideHelp: true,
- SkipArgReorder: true,
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+var (
+ runCommand cliconfig.RunValues
+
+ runDescription = "Runs a command in a new container from the given image"
+ _runCommand = &cobra.Command{
+ Use: "run",
+ Short: "Run a command in a new container",
+ Long: runDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ runCommand.InputArgs = args
+ runCommand.GlobalFlags = MainGlobalOpts
+ return runCmd(&runCommand)
+ },
+ Example: `podman run imageID ls -alF /etc
+ podman run --net=host imageID dnf -y install java
+ podman run --volume /var/hostdir:/var/ctrdir -i -t fedora /bin/bash`,
+ }
+)
+
+func init() {
+ runCommand.Command = _runCommand
+ runCommand.SetUsageTemplate(UsageTemplate())
+ flags := runCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.Bool("sig-proxy", true, "Proxy received signals to the process (default true)")
+ getCreateFlags(&runCommand.PodmanCommand)
}
-func runCmd(c *cli.Context) error {
- if err := createInit(c); err != nil {
+func runCmd(c *cliconfig.RunValues) error {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "runCmd")
+ defer span.Finish()
+ }
+
+ if err := createInit(&c.PodmanCommand); err != nil {
return err
}
if os.Geteuid() != 0 {
rootless.SetSkipStorageSetup(true)
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- ctr, createConfig, err := createContainer(c, runtime)
+ ctr, createConfig, err := createContainer(&c.PodmanCommand, runtime)
if err != nil {
return err
}
@@ -65,7 +80,8 @@ func runCmd(c *cli.Context) error {
ctx := getContext()
// Handle detached start
if createConfig.Detach {
- if err := ctr.Start(ctx); err != nil {
+ // if the container was created as part of a pod, also start its dependencies, if any.
+ if err := ctr.Start(ctx, c.IsSet("pod")); err != nil {
// This means the command did not exist
exitCode = 127
if strings.Index(err.Error(), "permission denied") > -1 {
@@ -110,14 +126,22 @@ func runCmd(c *cli.Context) error {
}
}
}
- if err := startAttachCtr(ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.BoolT("sig-proxy"), true); err != nil {
+ // if the container was created as part of a pod, also start its dependencies, if any.
+ if err := startAttachCtr(ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
+ // We've manually detached from the container
+ // Do not perform cleanup, or wait for container exit code
+ // Just exit immediately
+ if errors.Cause(err) == libpod.ErrDetach {
+ exitCode = 0
+ return nil
+ }
// This means the command did not exist
exitCode = 127
if strings.Index(err.Error(), "permission denied") > -1 {
exitCode = 126
}
if c.IsSet("rm") {
- if deleteError := runtime.RemoveContainer(ctx, ctr, true); deleteError != nil {
+ if deleteError := runtime.RemoveContainer(ctx, ctr, true, false); deleteError != nil {
logrus.Errorf("unable to remove container %s after failing to start and attach to it", ctr.ID())
}
}
@@ -140,28 +164,12 @@ func runCmd(c *cli.Context) error {
exitCode = int(ecode)
}
- if createConfig.Rm {
- return runtime.RemoveContainer(ctx, ctr, true)
- }
-
- if err := ctr.Cleanup(ctx); err != nil {
- // If the container has been removed already, no need to error on cleanup
- // Also, if it was restarted, don't error either
- if errors.Cause(err) == libpod.ErrNoSuchCtr ||
- errors.Cause(err) == libpod.ErrCtrRemoved ||
- errors.Cause(err) == libpod.ErrCtrStateInvalid {
- return nil
- }
-
- return err
- }
-
return nil
}
// Read a container's exit file
func readExitFile(runtimeTmp, ctrID string) (int, error) {
- exitFile := filepath.Join(runtimeTmp, "exits", ctrID)
+ exitFile := filepath.Join(runtimeTmp, "exits", fmt.Sprintf("%s-old", ctrID))
logrus.Debugf("Attempting to read container %s exit code from file %s", ctrID, exitFile)
diff --git a/cmd/podman/run_test.go b/cmd/podman/run_test.go
index 33c0a4bfe..5ea39e457 100644
--- a/cmd/podman/run_test.go
+++ b/cmd/podman/run_test.go
@@ -4,24 +4,19 @@ import (
"runtime"
"testing"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/pkg/inspect"
cc "github.com/containers/libpod/pkg/spec"
- units "github.com/docker/go-units"
+ "github.com/docker/go-units"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
- "github.com/urfave/cli"
)
var (
- cmd = []string{"podman", "test", "alpine"}
- CLI *cli.Context
- testCommand = cli.Command{
- Name: "test",
- Flags: sortFlags(createFlags),
- Action: testCmd,
- HideHelp: true,
- }
+ cmd = []string{"podman", "test", "alpine"}
+ CLI *cliconfig.PodmanCommand
)
// generates a mocked ImageData structure based on alpine
@@ -53,23 +48,29 @@ func generateAlpineImageData() *inspect.ImageData {
}
// sets a global CLI
-func testCmd(c *cli.Context) error {
- CLI = c
+func testCmd(c *cobra.Command) error {
+ CLI = &cliconfig.PodmanCommand{Command: c}
return nil
}
// creates the mocked cli pointing to our create flags
// global flags like log-level are not implemented
-func createCLI() cli.App {
- a := cli.App{
- Commands: []cli.Command{
- testCommand,
+func createCLI(args []string) *cliconfig.PodmanCommand {
+ var testCommand = &cliconfig.PodmanCommand{
+ Command: &cobra.Command{
+ Use: "test",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return testCmd(cmd)
+ },
},
}
- return a
+ rootCmd := testCommand
+ getCreateFlags(rootCmd)
+ rootCmd.ParseFlags(args)
+ return rootCmd
}
-func getRuntimeSpec(c *cli.Context) (*spec.Spec, error) {
+func getRuntimeSpec(c *cliconfig.PodmanCommand) (*spec.Spec, error) {
/*
TODO: This test has never worked. Need to install content
runtime, err := getRuntime(c)
@@ -98,10 +99,11 @@ func TestPIDsLimit(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("seccomp, which is enabled by default, is only supported on Linux")
}
- a := createCLI()
args := []string{"--pids-limit", "22"}
- a.Run(append(cmd, args...))
- runtimeSpec, err := getRuntimeSpec(CLI)
+ a := createCLI(args)
+ a.InputArgs = args
+ //a.Run(append(cmd, args...))
+ runtimeSpec, err := getRuntimeSpec(a)
if err != nil {
t.Fatalf(err.Error())
}
@@ -116,10 +118,10 @@ func TestBLKIOWeightDevice(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("seccomp, which is enabled by default, is only supported on Linux")
}
- a := createCLI()
args := []string{"--blkio-weight-device", "/dev/zero:100"}
- a.Run(append(cmd, args...))
- runtimeSpec, err := getRuntimeSpec(CLI)
+ a := createCLI(args)
+ a.InputArgs = args
+ runtimeSpec, err := getRuntimeSpec(a)
if err != nil {
t.Fatalf(err.Error())
}
@@ -134,10 +136,11 @@ func TestMemorySwap(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("seccomp, which is enabled by default, is only supported on Linux")
}
- a := createCLI()
args := []string{"--memory-swap", "45m", "--memory", "40m"}
- a.Run(append(cmd, args...))
- runtimeSpec, err := getRuntimeSpec(CLI)
+ a := createCLI(args)
+ a.InputArgs = args
+ //a.Run(append(cmd, args...))
+ runtimeSpec, err := getRuntimeSpec(a)
if err != nil {
t.Fatalf(err.Error())
}
diff --git a/cmd/podman/runlabel.go b/cmd/podman/runlabel.go
index 38905b5ca..d466651f3 100644
--- a/cmd/podman/runlabel.go
+++ b/cmd/podman/runlabel.go
@@ -7,88 +7,61 @@ import (
"strings"
"github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/utils"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- runlabelFlags = []cli.Flag{
- cli.StringFlag{
- Name: "authfile",
- Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
- },
- cli.BoolFlag{
- Name: "display",
- Usage: "Preview the command that the label would run",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Usage: "`Pathname` of a directory containing TLS certificates and keys",
- },
- cli.StringFlag{
- Name: "creds",
- Usage: "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry",
- },
- cli.StringFlag{
- Name: "name",
- Usage: "Assign a name to the container",
- },
- cli.StringFlag{
- Name: "opt1",
- Usage: "Optional parameter to pass for install",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "opt2",
- Usage: "Optional parameter to pass for install",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "opt3",
- Usage: "Optional parameter to pass for install",
- Hidden: true,
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress output information when installing images",
- },
- cli.BoolFlag{
- Name: "pull, p",
- Usage: "Pull the image if it does not exist locally prior to executing the label contents",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`Pathname` of signature policy file (not usually used)",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "Require HTTPS and verify certificates when contacting registries (default: true)",
- },
- }
-
+ runlabelCommand cliconfig.RunlabelValues
runlabelDescription = `
Executes a command as described by a container image label.
`
- runlabelCommand = cli.Command{
- Name: "runlabel",
- Usage: "Execute the command described by an image label",
- Description: runlabelDescription,
- Flags: sortFlags(runlabelFlags),
- Action: runlabelCmd,
- ArgsUsage: "",
- SkipArgReorder: true,
- OnUsageError: usageErrorHandler,
+ _runlabelCommand = &cobra.Command{
+ Use: "runlabel",
+ Short: "Execute the command described by an image label",
+ Long: runlabelDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ runlabelCommand.InputArgs = args
+ runlabelCommand.GlobalFlags = MainGlobalOpts
+ return runlabelCmd(&runlabelCommand)
+ },
+ Example: `podman container runlabel run imageID
+ podman container runlabel --pull install imageID arg1 arg2
+ podman container runlabel --display run myImage`,
}
)
+func init() {
+ runlabelCommand.Command = _runlabelCommand
+ runlabelCommand.SetUsageTemplate(UsageTemplate())
+ flags := runlabelCommand.Flags()
+ flags.StringVar(&runlabelCommand.Authfile, "authfile", "", "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&runlabelCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
+ flags.StringVar(&runlabelCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
+ flags.BoolVar(&runlabelCommand.Display, "display", false, "Preview the command that the label would run")
+ flags.StringVar(&runlabelCommand.Name, "name", "", "Assign a name to the container")
+
+ flags.StringVar(&runlabelCommand.Opt1, "opt1", "", "Optional parameter to pass for install")
+ flags.StringVar(&runlabelCommand.Opt2, "opt2", "", "Optional parameter to pass for install")
+ flags.StringVar(&runlabelCommand.Opt3, "opt3", "", "Optional parameter to pass for install")
+ flags.MarkHidden("opt1")
+ flags.MarkHidden("opt2")
+ flags.MarkHidden("opt3")
+
+ flags.BoolVarP(&runlabelCommand.Pull, "pull", "p", false, "Pull the image if it does not exist locally prior to executing the label contents")
+ flags.BoolVarP(&runlabelCommand.Quiet, "quiet", "q", false, "Suppress output information when installing images")
+ flags.StringVar(&runlabelCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
+ flags.BoolVar(&runlabelCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries (default: true)")
+}
+
// installCmd gets the data from the command line and calls installImage
// to copy an image from a registry to a local machine
-func runlabelCmd(c *cli.Context) error {
+func runlabelCmd(c *cliconfig.RunlabelValues) error {
var (
imageName string
stdErr, stdOut io.Writer
@@ -105,40 +78,37 @@ func runlabelCmd(c *cli.Context) error {
}
opts := make(map[string]string)
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- args := c.Args()
+ args := c.InputArgs
if len(args) < 2 {
- logrus.Errorf("the runlabel command requires at least 2 arguments: LABEL IMAGE")
- return nil
+ return errors.Errorf("the runlabel command requires at least 2 arguments: LABEL IMAGE")
}
- if err := validateFlags(c, runlabelFlags); err != nil {
- return err
- }
- if c.Bool("display") && c.Bool("quiet") {
+ if c.Display && c.Quiet {
return errors.Errorf("the display and quiet flags cannot be used together.")
}
if len(args) > 2 {
extraArgs = args[2:]
}
- pull := c.Bool("pull")
+ pull := c.Pull
label := args[0]
runlabelImage := args[1]
- if c.IsSet("opt1") {
- opts["opt1"] = c.String("opt1")
+ if c.Flag("opt1").Changed {
+ opts["opt1"] = c.Opt1
}
- if c.IsSet("opt2") {
- opts["opt2"] = c.String("opt2")
+
+ if c.Flag("opt2").Changed {
+ opts["opt2"] = c.Opt2
}
- if c.IsSet("opt3") {
- opts["opt3"] = c.String("opt3")
+ if c.Flag("opt3").Changed {
+ opts["opt3"] = c.Opt3
}
ctx := getContext()
@@ -147,21 +117,21 @@ func runlabelCmd(c *cli.Context) error {
stdOut = os.Stdout
stdIn = os.Stdin
- if c.Bool("quiet") {
+ if c.Quiet {
stdErr = nil
stdOut = nil
stdIn = nil
}
dockerRegistryOptions := image.DockerRegistryOptions{
- DockerCertPath: c.String("cert-dir"),
+ DockerCertPath: c.CertDir,
}
- if c.IsSet("tls-verify") {
- dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ if c.Flag("tls-verify").Changed {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
- authfile := getAuthFile(c.String("authfile"))
- runLabel, imageName, err := shared.GetRunlabel(label, runlabelImage, ctx, runtime, pull, c.String("creds"), dockerRegistryOptions, authfile, c.String("signature-policy"), stdOut)
+ authfile := getAuthFile(c.Authfile)
+ runLabel, imageName, err := shared.GetRunlabel(label, runlabelImage, ctx, runtime, pull, c.Creds, dockerRegistryOptions, authfile, c.SignaturePolicy, stdOut)
if err != nil {
return err
}
@@ -169,13 +139,13 @@ func runlabelCmd(c *cli.Context) error {
return errors.Errorf("%s does not have a label of %s", runlabelImage, label)
}
- cmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, c.String("name"), opts, extraArgs)
+ cmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, c.Name, opts, extraArgs)
if err != nil {
return err
}
- if !c.Bool("quiet") {
+ if !c.Quiet {
fmt.Printf("Command: %s\n", strings.Join(cmd, " "))
- if c.Bool("display") {
+ if c.Display {
return nil
}
}
diff --git a/cmd/podman/save.go b/cmd/podman/save.go
index 325140b76..161540deb 100644
--- a/cmd/podman/save.go
+++ b/cmd/podman/save.go
@@ -1,92 +1,85 @@
package main
import (
- "fmt"
- "io"
"os"
"strings"
- "github.com/containers/image/directory"
- dockerarchive "github.com/containers/image/docker/archive"
- "github.com/containers/image/docker/reference"
- "github.com/containers/image/manifest"
- ociarchive "github.com/containers/image/oci/archive"
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- libpodImage "github.com/containers/libpod/libpod/image"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
const (
ociManifestDir = "oci-dir"
+ ociArchive = "oci-archive"
v2s2ManifestDir = "docker-dir"
+ v2s2Archive = "docker-archive"
)
+var validFormats = []string{ociManifestDir, ociArchive, v2s2ManifestDir, v2s2Archive}
+
var (
- saveFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "compress",
- Usage: "Compress tarball image layers when saving to a directory using the 'dir' transport. (default is same compression type as source)",
- },
- cli.StringFlag{
- Name: "output, o",
- Usage: "Write to a file, default is STDOUT",
- Value: "/dev/stdout",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Suppress the output",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Save image to oci-archive, oci-dir (directory with oci manifest type), docker-dir (directory with v2s2 manifest type)",
- },
- }
+ saveCommand cliconfig.SaveValues
saveDescription = `
Save an image to docker-archive or oci-archive on the local machine.
Default is docker-archive`
- saveCommand = cli.Command{
- Name: "save",
- Usage: "Save image to an archive",
- Description: saveDescription,
- Flags: sortFlags(saveFlags),
- Action: saveCmd,
- ArgsUsage: "",
- SkipArgReorder: true,
- OnUsageError: usageErrorHandler,
+ _saveCommand = &cobra.Command{
+ Use: "save",
+ Short: "Save image to an archive",
+ Long: saveDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ saveCommand.InputArgs = args
+ saveCommand.GlobalFlags = MainGlobalOpts
+ return saveCmd(&saveCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ format, err := cmd.Flags().GetString("format")
+ if err != nil {
+ return err
+ }
+ if !util.StringInSlice(format, validFormats) {
+ return errors.Errorf("format value must be one of %s", strings.Join(validFormats, " "))
+ }
+ return nil
+ },
+ Example: `podman save --quiet -o myimage.tar imageID
+ podman save --format docker-dir -o ubuntu-dir ubuntu
+ podman save > alpine-all.tar alpine:latest`,
}
)
+func init() {
+ saveCommand.Command = _saveCommand
+ saveCommand.SetUsageTemplate(UsageTemplate())
+ flags := saveCommand.Flags()
+ flags.BoolVar(&saveCommand.Compress, "compress", false, "Compress tarball image layers when saving to a directory using the 'dir' transport. (default is same compression type as source)")
+ flags.StringVar(&saveCommand.Format, "format", v2s2Archive, "Save image to oci-archive, oci-dir (directory with oci manifest type), docker-archive, docker-dir (directory with v2s2 manifest type)")
+ flags.StringVarP(&saveCommand.Output, "output", "o", "/dev/stdout", "Write to a file, default is STDOUT")
+ flags.BoolVarP(&saveCommand.Quiet, "quiet", "q", false, "Suppress the output")
+}
+
// saveCmd saves the image to either docker-archive or oci
-func saveCmd(c *cli.Context) error {
- args := c.Args()
+func saveCmd(c *cliconfig.SaveValues) error {
+ args := c.InputArgs
if len(args) == 0 {
return errors.Errorf("need at least 1 argument")
}
- if err := validateFlags(c, saveFlags); err != nil {
- return err
- }
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
defer runtime.Shutdown(false)
- if c.IsSet("compress") && (c.String("format") != ociManifestDir && c.String("format") != v2s2ManifestDir && c.String("format") == "") {
+ if c.Flag("compress").Changed && (c.Format != ociManifestDir && c.Format != v2s2ManifestDir && c.Format == "") {
return errors.Errorf("--compress can only be set when --format is either 'oci-dir' or 'docker-dir'")
}
- var writer io.Writer
- if !c.Bool("quiet") {
- writer = os.Stderr
- }
-
- output := c.String("output")
+ output := c.Output
if output == "/dev/stdout" {
fi := os.Stdout
if logrus.IsTerminal(fi) {
@@ -96,87 +89,5 @@ func saveCmd(c *cli.Context) error {
if err := validateFileName(output); err != nil {
return err
}
-
- source := args[0]
- newImage, err := runtime.ImageRuntime().NewFromLocal(source)
- if err != nil {
- return err
- }
-
- var destRef types.ImageReference
- var manifestType string
- switch c.String("format") {
- case "oci-archive":
- destImageName := imageNameForSaveDestination(newImage, source)
- destRef, err = ociarchive.NewReference(output, destImageName) // destImageName may be ""
- if err != nil {
- return errors.Wrapf(err, "error getting OCI archive ImageReference for (%q, %q)", output, destImageName)
- }
- case "oci-dir":
- destRef, err = directory.NewReference(output)
- if err != nil {
- return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
- }
- manifestType = imgspecv1.MediaTypeImageManifest
- case "docker-dir":
- destRef, err = directory.NewReference(output)
- if err != nil {
- return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
- }
- manifestType = manifest.DockerV2Schema2MediaType
- case "docker-archive", "":
- dst := output
- destImageName := imageNameForSaveDestination(newImage, source)
- if destImageName != "" {
- dst = fmt.Sprintf("%s:%s", dst, destImageName)
- }
- destRef, err = dockerarchive.ParseReference(dst) // FIXME? Add dockerarchive.NewReference
- if err != nil {
- return errors.Wrapf(err, "error getting Docker archive ImageReference for %q", dst)
- }
- default:
- return errors.Errorf("unknown format option %q", c.String("format"))
- }
-
- // supports saving multiple tags to the same tar archive
- var additionaltags []reference.NamedTagged
- if len(args) > 1 {
- additionaltags, err = libpodImage.GetAdditionalTags(args[1:])
- if err != nil {
- return err
- }
- }
- if err := newImage.PushImageToReference(getContext(), destRef, manifestType, "", "", writer, c.Bool("compress"), libpodImage.SigningOptions{}, &libpodImage.DockerRegistryOptions{}, additionaltags); err != nil {
- if err2 := os.Remove(output); err2 != nil {
- logrus.Errorf("error deleting %q: %v", output, err)
- }
- return errors.Wrapf(err, "unable to save %q", args)
- }
-
- return nil
-}
-
-// imageNameForSaveDestination returns a Docker-like reference appropriate for saving img,
-// which the user referred to as imgUserInput; or an empty string, if there is no appropriate
-// reference.
-func imageNameForSaveDestination(img *libpodImage.Image, imgUserInput string) string {
- if strings.Contains(img.ID(), imgUserInput) {
- return ""
- }
-
- prepend := ""
- localRegistryPrefix := fmt.Sprintf("%s/", libpodImage.DefaultLocalRegistry)
- if !strings.HasPrefix(imgUserInput, localRegistryPrefix) {
- // we need to check if localhost was added to the image name in NewFromLocal
- for _, name := range img.Names() {
- // If the user is saving an image in the localhost registry, getLocalImage need
- // a name that matches the format localhost/<tag1>:<tag2> or localhost/<tag>:latest to correctly
- // set up the manifest and save.
- if strings.HasPrefix(name, localRegistryPrefix) && (strings.HasSuffix(name, imgUserInput) || strings.HasSuffix(name, fmt.Sprintf("%s:latest", imgUserInput))) {
- prepend = localRegistryPrefix
- break
- }
- }
- }
- return fmt.Sprintf("%s%s", prepend, imgUserInput)
+ return runtime.SaveImage(getContext(), c)
}
diff --git a/cmd/podman/search.go b/cmd/podman/search.go
index 81469a0f8..f63131c84 100644
--- a/cmd/podman/search.go
+++ b/cmd/podman/search.go
@@ -1,20 +1,14 @@
package main
import (
- "context"
- "reflect"
- "strconv"
"strings"
- "github.com/containers/image/docker"
"github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
- "github.com/containers/libpod/libpod/common"
- sysreg "github.com/containers/libpod/pkg/registries"
- "github.com/docker/distribution/reference"
+ "github.com/containers/libpod/libpod/image"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
const (
@@ -23,72 +17,39 @@ const (
)
var (
- searchFlags = []cli.Flag{
- cli.StringFlag{
- Name: "authfile",
- Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
- },
- cli.StringSliceFlag{
- Name: "filter, f",
- Usage: "Filter output based on conditions provided (default [])",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Change the output format to a Go template",
- },
- cli.IntFlag{
- Name: "limit",
- Usage: "Limit the number of results",
- },
- cli.BoolFlag{
- Name: "no-trunc",
- Usage: "Do not truncate the output",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "Require HTTPS and verify certificates when contacting registries (default: true)",
- },
- }
+ searchCommand cliconfig.SearchValues
searchDescription = `
Search registries for a given image. Can search all the default registries or a specific registry.
Can limit the number of results, and filter the output based on certain conditions.`
- searchCommand = cli.Command{
- Name: "search",
- Usage: "Search registry for image",
- Description: searchDescription,
- Flags: sortFlags(searchFlags),
- Action: searchCmd,
- ArgsUsage: "TERM",
- OnUsageError: usageErrorHandler,
+ _searchCommand = &cobra.Command{
+ Use: "search",
+ Short: "Search registry for image",
+ Long: searchDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ searchCommand.InputArgs = args
+ searchCommand.GlobalFlags = MainGlobalOpts
+ return searchCmd(&searchCommand)
+ },
+ Example: `podman search --filter=is-official --limit 3 alpine
+ podman search registry.fedoraproject.org/ # only works with v2 registries
+ podman search --format "table {{.Index}} {{.Name}}" registry.fedoraproject.org/fedora`,
}
)
-type searchParams struct {
- Index string
- Name string
- Description string
- Stars int
- Official string
- Automated string
-}
-
-type searchOpts struct {
- filter []string
- limit int
- noTrunc bool
- format string
- authfile string
- insecureSkipTLSVerify types.OptionalBool
+func init() {
+ searchCommand.Command = _searchCommand
+ searchCommand.SetUsageTemplate(UsageTemplate())
+ flags := searchCommand.Flags()
+ flags.StringVar(&searchCommand.Authfile, "authfile", "", "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringSliceVarP(&searchCommand.Filter, "filter", "f", []string{}, "Filter output based on conditions provided (default [])")
+ flags.StringVar(&searchCommand.Format, "format", "", "Change the output format to a Go template")
+ flags.IntVar(&searchCommand.Limit, "limit", 0, "Limit the number of results")
+ flags.BoolVar(&searchCommand.NoTrunc, "no-trunc", false, "Do not truncate the output")
+ flags.BoolVar(&searchCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries (default: true)")
}
-type searchFilterParams struct {
- stars int
- isAutomated *bool
- isOfficial *bool
-}
-
-func searchCmd(c *cli.Context) error {
- args := c.Args()
+func searchCmd(c *cliconfig.SearchValues) error {
+ args := c.InputArgs
if len(args) > 1 {
return errors.Errorf("too many arguments. Requires exactly 1")
}
@@ -97,41 +58,29 @@ func searchCmd(c *cli.Context) error {
}
term := args[0]
- // Check if search term has a registry in it
- registry, err := getRegistry(term)
+ filter, err := image.ParseSearchFilter(c.Filter)
if err != nil {
- return errors.Wrapf(err, "error getting registry from %q", term)
- }
- if registry != "" {
- term = term[len(registry)+1:]
- }
-
- if err := validateFlags(c, searchFlags); err != nil {
return err
}
- format := genSearchFormat(c.String("format"))
- opts := searchOpts{
- format: format,
- noTrunc: c.Bool("no-trunc"),
- limit: c.Int("limit"),
- filter: c.StringSlice("filter"),
- authfile: getAuthFile(c.String("authfile")),
- }
- if c.IsSet("tls-verify") {
- opts.insecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ searchOptions := image.SearchOptions{
+ NoTrunc: c.NoTrunc,
+ Limit: c.Limit,
+ Filter: *filter,
+ Authfile: getAuthFile(c.Authfile),
}
- registries, err := getRegistries(registry)
- if err != nil {
- return err
+ if c.Flag("tls-verify").Changed {
+ searchOptions.InsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
- filter, err := parseSearchFilter(&opts)
+ results, err := image.SearchImages(term, searchOptions)
if err != nil {
return err
}
-
- return generateSearchOutput(term, registries, opts, *filter)
+ format := genSearchFormat(c.Format)
+ out := formats.StdoutTemplateArray{Output: searchToGeneric(results), Template: format, Fields: results[0].HeaderMap()}
+ formats.Writer(out).Out()
+ return nil
}
func genSearchFormat(format string) string {
@@ -143,188 +92,9 @@ func genSearchFormat(format string) string {
return "table {{.Index}}\t{{.Name}}\t{{.Description}}\t{{.Stars}}\t{{.Official}}\t{{.Automated}}\t"
}
-func searchToGeneric(params []searchParams) (genericParams []interface{}) {
+func searchToGeneric(params []image.SearchResult) (genericParams []interface{}) {
for _, v := range params {
genericParams = append(genericParams, interface{}(v))
}
return genericParams
}
-
-func (s *searchParams) headerMap() map[string]string {
- v := reflect.Indirect(reflect.ValueOf(s))
- values := make(map[string]string, v.NumField())
-
- for i := 0; i < v.NumField(); i++ {
- key := v.Type().Field(i).Name
- value := key
- values[key] = strings.ToUpper(splitCamelCase(value))
- }
- return values
-}
-
-// getRegistries returns the list of registries to search, depending on an optional registry specification
-func getRegistries(registry string) ([]string, error) {
- var registries []string
- if registry != "" {
- registries = append(registries, registry)
- } else {
- var err error
- registries, err = sysreg.GetRegistries()
- if err != nil {
- return nil, errors.Wrapf(err, "error getting registries to search")
- }
- }
- return registries, nil
-}
-
-func getSearchOutput(term string, registries []string, opts searchOpts, filter searchFilterParams) ([]searchParams, error) {
- // Max number of queries by default is 25
- limit := maxQueries
- if opts.limit != 0 {
- limit = opts.limit
- }
-
- sc := common.GetSystemContext("", opts.authfile, false)
- sc.DockerInsecureSkipTLSVerify = opts.insecureSkipTLSVerify
- sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
- var paramsArr []searchParams
- for _, reg := range registries {
- results, err := docker.SearchRegistry(context.TODO(), sc, reg, term, limit)
- if err != nil {
- logrus.Errorf("error searching registry %q: %v", reg, err)
- continue
- }
- index := reg
- arr := strings.Split(reg, ".")
- if len(arr) > 2 {
- index = strings.Join(arr[len(arr)-2:], ".")
- }
-
- // limit is the number of results to output
- // if the total number of results is less than the limit, output all
- // if the limit has been set by the user, output those number of queries
- limit := maxQueries
- if len(results) < limit {
- limit = len(results)
- }
- if opts.limit != 0 && opts.limit < len(results) {
- limit = opts.limit
- }
-
- for i := 0; i < limit; i++ {
- if len(opts.filter) > 0 {
- // Check whether query matches filters
- if !(matchesAutomatedFilter(filter, results[i]) && matchesOfficialFilter(filter, results[i]) && matchesStarFilter(filter, results[i])) {
- continue
- }
- }
- official := ""
- if results[i].IsOfficial {
- official = "[OK]"
- }
- automated := ""
- if results[i].IsAutomated {
- automated = "[OK]"
- }
- description := strings.Replace(results[i].Description, "\n", " ", -1)
- if len(description) > 44 && !opts.noTrunc {
- description = description[:descriptionTruncLength] + "..."
- }
- name := reg + "/" + results[i].Name
- if index == "docker.io" && !strings.Contains(results[i].Name, "/") {
- name = index + "/library/" + results[i].Name
- }
- params := searchParams{
- Index: index,
- Name: name,
- Description: description,
- Official: official,
- Automated: automated,
- Stars: results[i].StarCount,
- }
- paramsArr = append(paramsArr, params)
- }
- }
- return paramsArr, nil
-}
-
-func generateSearchOutput(term string, registries []string, opts searchOpts, filter searchFilterParams) error {
- searchOutput, err := getSearchOutput(term, registries, opts, filter)
- if err != nil {
- return err
- }
- if len(searchOutput) == 0 {
- return nil
- }
- out := formats.StdoutTemplateArray{Output: searchToGeneric(searchOutput), Template: opts.format, Fields: searchOutput[0].headerMap()}
- return formats.Writer(out).Out()
-}
-
-func parseSearchFilter(opts *searchOpts) (*searchFilterParams, error) {
- filterParams := &searchFilterParams{}
- ptrTrue := true
- ptrFalse := false
- for _, filter := range opts.filter {
- arr := strings.Split(filter, "=")
- switch arr[0] {
- case "stars":
- if len(arr) < 2 {
- return nil, errors.Errorf("invalid `stars` filter %q, should be stars=<value>", filter)
- }
- stars, err := strconv.Atoi(arr[1])
- if err != nil {
- return nil, errors.Wrapf(err, "incorrect value type for stars filter")
- }
- filterParams.stars = stars
- break
- case "is-automated":
- if len(arr) == 2 && arr[1] == "false" {
- filterParams.isAutomated = &ptrFalse
- } else {
- filterParams.isAutomated = &ptrTrue
- }
- break
- case "is-official":
- if len(arr) == 2 && arr[1] == "false" {
- filterParams.isOfficial = &ptrFalse
- } else {
- filterParams.isOfficial = &ptrTrue
- }
- break
- default:
- return nil, errors.Errorf("invalid filter type %q", filter)
- }
- }
- return filterParams, nil
-}
-
-func matchesStarFilter(filter searchFilterParams, result docker.SearchResult) bool {
- return result.StarCount >= filter.stars
-}
-
-func matchesAutomatedFilter(filter searchFilterParams, result docker.SearchResult) bool {
- if filter.isAutomated != nil {
- return result.IsAutomated == *filter.isAutomated
- }
- return true
-}
-
-func matchesOfficialFilter(filter searchFilterParams, result docker.SearchResult) bool {
- if filter.isOfficial != nil {
- return result.IsOfficial == *filter.isOfficial
- }
- return true
-}
-
-func getRegistry(image string) (string, error) {
- // It is possible to only have the registry name in the format "myregistry/"
- // if so, just trim the "/" from the end and return the registry name
- if strings.HasSuffix(image, "/") {
- return strings.TrimSuffix(image, "/"), nil
- }
- imgRef, err := reference.Parse(image)
- if err != nil {
- return "", err
- }
- return reference.Domain(imgRef.(reference.Named)), nil
-}
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index f84fb8261..81811e0f2 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -88,6 +88,7 @@ type PsContainerOutput struct {
PIDNS string
User string
UTS string
+ Mounts string
}
// Namespace describes output for ps namespace
@@ -212,11 +213,16 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput
}
}
+ ports, err := ctr.PortMappings()
+ if err != nil {
+ logrus.Errorf("unable to lookup namespace container for %s", ctr.ID())
+ }
+
pso.ID = cid
pso.Image = imageName
pso.Command = command
pso.Created = created
- pso.Ports = portsToString(ctr.PortMappings())
+ pso.Ports = portsToString(ports)
pso.Names = ctr.Name()
pso.IsInfra = ctr.IsInfra()
pso.Status = status
@@ -228,6 +234,7 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput
pso.CreatedAt = ctr.CreatedTime()
pso.StartedAt = startedAt
pso.Labels = ctr.Labels()
+ pso.Mounts = strings.Join(ctr.UserVolumes(), " ")
if opts.Namespace {
pso.Cgroup = ns.Cgroup
diff --git a/cmd/podman/shared/pod.go b/cmd/podman/shared/pod.go
index 30dd14845..5f65c40ac 100644
--- a/cmd/podman/shared/pod.go
+++ b/cmd/podman/shared/pod.go
@@ -26,6 +26,10 @@ func GetPodStatus(pod *libpod.Pod) (string, error) {
if err != nil {
return errored, err
}
+ return CreatePodStatusResults(ctrStatuses)
+}
+
+func CreatePodStatusResults(ctrStatuses map[string]libpod.ContainerStatus) (string, error) {
ctrNum := len(ctrStatuses)
if ctrNum == 0 {
return created, nil
diff --git a/cmd/podman/sign.go b/cmd/podman/sign.go
index 22aa07230..6e8f9ee95 100644
--- a/cmd/podman/sign.go
+++ b/cmd/podman/sign.go
@@ -11,60 +11,63 @@ import (
"github.com/containers/image/signature"
"github.com/containers/image/transports"
"github.com/containers/image/transports/alltransports"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/trust"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- signFlags = []cli.Flag{
- cli.StringFlag{
- Name: "sign-by",
- Usage: "Name of the signing key",
- },
- cli.StringFlag{
- Name: "directory, d",
- Usage: "Define an alternate directory to store signatures",
- },
- }
-
+ signCommand cliconfig.SignValues
signDescription = "Create a signature file that can be used later to verify the image"
- signCommand = cli.Command{
- Name: "sign",
- Usage: "Sign an image",
- Description: signDescription,
- Flags: sortFlags(signFlags),
- Action: signCmd,
- ArgsUsage: "IMAGE-NAME [IMAGE-NAME ...]",
- OnUsageError: usageErrorHandler,
+ _signCommand = &cobra.Command{
+ Use: "sign",
+ Short: "Sign an image",
+ Long: signDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ signCommand.InputArgs = args
+ signCommand.GlobalFlags = MainGlobalOpts
+ return signCmd(&signCommand)
+ },
+ Example: `podman sign --sign-by mykey imageID
+ podman sign --sign-by mykey --directory ./mykeydir imageID`,
}
)
+func init() {
+ signCommand.Command = _signCommand
+ signCommand.SetUsageTemplate(UsageTemplate())
+ flags := signCommand.Flags()
+ flags.StringVarP(&signCommand.Directory, "directory", "d", "", "Define an alternate directory to store signatures")
+ flags.StringVar(&signCommand.SignBy, "sign-by", "", "Name of the signing key")
+
+}
+
// SignatureStoreDir defines default directory to store signatures
const SignatureStoreDir = "/var/lib/containers/sigstore"
-func signCmd(c *cli.Context) error {
- args := c.Args()
+func signCmd(c *cliconfig.SignValues) error {
+ args := c.InputArgs
if len(args) < 1 {
return errors.Errorf("at least one image name must be specified")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
defer runtime.Shutdown(false)
- signby := c.String("sign-by")
+ signby := c.SignBy
if signby == "" {
return errors.Errorf("please provide an identity")
}
var sigStoreDir string
- if c.IsSet("directory") {
- sigStoreDir = c.String("directory")
+ if c.Flag("directory").Changed {
+ sigStoreDir = c.Directory
if _, err := os.Stat(sigStoreDir); err != nil {
return errors.Wrapf(err, "invalid directory %s", sigStoreDir)
}
diff --git a/cmd/podman/start.go b/cmd/podman/start.go
index f6e1d9882..c645a35c4 100644
--- a/cmd/podman/start.go
+++ b/cmd/podman/start.go
@@ -1,88 +1,85 @@
package main
import (
- "encoding/json"
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
- cc "github.com/containers/libpod/pkg/spec"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- startFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "attach, a",
- Usage: "Attach container's STDOUT and STDERR",
- },
- cli.StringFlag{
- Name: "detach-keys",
- Usage: "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _.",
- },
- cli.BoolFlag{
- Name: "interactive, i",
- Usage: "Keep STDIN open even if not attached",
- },
- cli.BoolTFlag{
- Name: "sig-proxy",
- Usage: "Proxy received signals to the process (default true if attaching, false otherwise)",
- },
- LatestFlag,
- }
+ startCommand cliconfig.StartValues
startDescription = `
podman start
Starts one or more containers. The container name or ID can be used.
`
-
- startCommand = cli.Command{
- Name: "start",
- Usage: "Start one or more containers",
- Description: startDescription,
- Flags: sortFlags(startFlags),
- Action: startCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+ _startCommand = &cobra.Command{
+ Use: "start",
+ Short: "Start one or more containers",
+ Long: startDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ startCommand.InputArgs = args
+ startCommand.GlobalFlags = MainGlobalOpts
+ return startCmd(&startCommand)
+ },
+ Example: `podman start --latest
+ podman start 860a4b231279 5421ab43b45
+ podman start --interactive --attach imageID`,
}
)
-func startCmd(c *cli.Context) error {
- args := c.Args()
- if len(args) < 1 && !c.Bool("latest") {
+func init() {
+ startCommand.Command = _startCommand
+ startCommand.SetUsageTemplate(UsageTemplate())
+ flags := startCommand.Flags()
+ flags.BoolVarP(&startCommand.Attach, "attach", "a", false, "Attach container's STDOUT and STDERR")
+ flags.StringVar(&startCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
+ flags.BoolVarP(&startCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
+ flags.BoolVarP(&startCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.BoolVar(&startCommand.SigProxy, "sig-proxy", true, "Proxy received signals to the process (default true if attaching, false otherwise)")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func startCmd(c *cliconfig.StartValues) error {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "startCmd")
+ defer span.Finish()
+ }
+
+ args := c.InputArgs
+ if len(args) < 1 && !c.Latest {
return errors.Errorf("you must provide at least one container name or id")
}
- attach := c.Bool("attach")
+ attach := c.Attach
if len(args) > 1 && attach {
return errors.Errorf("you cannot start and attach multiple containers at once")
}
- if err := validateFlags(c, startFlags); err != nil {
- return err
- }
-
- sigProxy := c.BoolT("sig-proxy")
+ sigProxy := c.SigProxy
if sigProxy && !attach {
- if c.IsSet("sig-proxy") {
+ if c.Flag("sig-proxy").Changed {
return errors.Wrapf(libpod.ErrInvalidArg, "you cannot use sig-proxy without --attach")
} else {
sigProxy = false
}
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- if c.Bool("latest") {
+ if c.Latest {
lastCtr, err := runtime.GetLatestContainer()
if err != nil {
return errors.Wrapf(err, "unable to get latest container")
@@ -112,12 +109,20 @@ func startCmd(c *cli.Context) error {
if attach {
inputStream := os.Stdin
- if !c.Bool("interactive") {
+ if !c.Interactive {
inputStream = nil
}
// attach to the container and also start it not already running
- err = startAttachCtr(ctr, os.Stdout, os.Stderr, inputStream, c.String("detach-keys"), sigProxy, !ctrRunning)
+ // If the container is in a pod, also set to recursively start dependencies
+ err = startAttachCtr(ctr, os.Stdout, os.Stderr, inputStream, c.DetachKeys, sigProxy, !ctrRunning, ctr.PodID() != "")
+ if errors.Cause(err) == libpod.ErrDetach {
+ // User manually detached
+ // Exit cleanly immediately
+ exitCode = 0
+ return nil
+ }
+
if ctrRunning {
return err
}
@@ -127,31 +132,30 @@ func startCmd(c *cli.Context) error {
}
if ecode, err := ctr.Wait(); err != nil {
- logrus.Errorf("unable to get exit code of container %s: %q", ctr.ID(), err)
+ if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ // The container may have been removed
+ // Go looking for an exit file
+ ctrExitCode, err := readExitFile(runtime.GetConfig().TmpDir, ctr.ID())
+ if err != nil {
+ logrus.Errorf("Cannot get exit code: %v", err)
+ exitCode = 127
+ } else {
+ exitCode = ctrExitCode
+ }
+ }
} else {
exitCode = int(ecode)
}
- return ctr.Cleanup(ctx)
+ return nil
}
if ctrRunning {
fmt.Println(ctr.ID())
continue
}
// Handle non-attach start
- if err := ctr.Start(ctx); err != nil {
- var createArtifact cc.CreateConfig
- artifact, artifactErr := ctr.GetArtifact("create-config")
- if artifactErr == nil {
- if jsonErr := json.Unmarshal(artifact, &createArtifact); jsonErr != nil {
- logrus.Errorf("unable to detect if container %s should be deleted", ctr.ID())
- }
- if createArtifact.Rm {
- if rmErr := runtime.RemoveContainer(ctx, ctr, true); rmErr != nil {
- logrus.Errorf("unable to remove container %s after it failed to start", ctr.ID())
- }
- }
- }
+ // If the container is in a pod, also set to recursively start dependencies
+ if err := ctr.Start(ctx, ctr.PodID() != ""); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
diff --git a/cmd/podman/stats.go b/cmd/podman/stats.go
index 769354b23..2bbcd0a17 100644
--- a/cmd/podman/stats.go
+++ b/cmd/podman/stats.go
@@ -8,12 +8,13 @@ import (
"time"
tm "github.com/buger/goterm"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/docker/go-units"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
type statsOutputParams struct {
@@ -28,48 +29,46 @@ type statsOutputParams struct {
}
var (
- statsFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Show all containers. Only running containers are shown by default. The default is false",
- },
- cli.BoolFlag{
- Name: "no-stream",
- Usage: "Disable streaming stats and only pull the first result, default setting is false",
+ statsCommand cliconfig.StatsValues
+
+ statsDescription = "display a live stream of one or more containers' resource usage statistics"
+ _statsCommand = &cobra.Command{
+ Use: "stats",
+ Short: "Display percentage of CPU, memory, network I/O, block I/O and PIDs for one or more containers",
+ Long: statsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ statsCommand.InputArgs = args
+ statsCommand.GlobalFlags = MainGlobalOpts
+ return statsCmd(&statsCommand)
},
- cli.StringFlag{
- Name: "format",
- Usage: "Pretty-print container statistics to JSON or using a Go template",
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
},
- cli.BoolFlag{
- Name: "no-reset",
- Usage: "Disable resetting the screen between intervals",
- }, LatestFlag,
- }
-
- statsDescription = "Display a live stream of one or more containers' resource usage statistics"
- statsCommand = cli.Command{
- Name: "stats",
- Usage: "Display percentage of CPU, memory, network I/O, block I/O and PIDs for one or more containers",
- Description: statsDescription,
- Flags: sortFlags(statsFlags),
- Action: statsCmd,
- ArgsUsage: "",
- OnUsageError: usageErrorHandler,
+ Example: `podman stats --all --no-stream
+ podman stats ctrID
+ podman stats --no-stream --format "table {{.ID}} {{.Name}} {{.MemUsage}}" ctrID`,
}
)
-func statsCmd(c *cli.Context) error {
- if err := validateFlags(c, statsFlags); err != nil {
- return err
- }
+func init() {
+ statsCommand.Command = _statsCommand
+ statsCommand.SetUsageTemplate(UsageTemplate())
+ flags := statsCommand.Flags()
+ flags.BoolVarP(&statsCommand.All, "all", "a", false, "Show all containers. Only running containers are shown by default. The default is false")
+ flags.StringVar(&statsCommand.Format, "format", "", "Pretty-print container statistics to JSON or using a Go template")
+ flags.BoolVarP(&statsCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.BoolVar(&statsCommand.NoReset, "no-reset", false, "Disable resetting the screen between intervals")
+ flags.BoolVar(&statsCommand.NoStream, "no-stream", false, "Disable streaming stats and only pull the first result, default setting is false")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+func statsCmd(c *cliconfig.StatsValues) error {
if os.Geteuid() != 0 {
return errors.New("stats is not supported for rootless containers")
}
- all := c.Bool("all")
- latest := c.Bool("latest")
+ all := c.All
+ latest := c.Latest
ctr := 0
if all {
ctr += 1
@@ -77,7 +76,7 @@ func statsCmd(c *cli.Context) error {
if latest {
ctr += 1
}
- if len(c.Args()) > 0 {
+ if len(c.InputArgs) > 0 {
ctr += 1
}
@@ -87,14 +86,14 @@ func statsCmd(c *cli.Context) error {
return errors.Errorf("you must specify --all, --latest, or at least one container")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
times := -1
- if c.Bool("no-stream") {
+ if c.NoStream {
times = 1
}
@@ -102,8 +101,8 @@ func statsCmd(c *cli.Context) error {
var containerFunc func() ([]*libpod.Container, error)
containerFunc = runtime.GetRunningContainers
- if len(c.Args()) > 0 {
- containerFunc = func() ([]*libpod.Container, error) { return runtime.GetContainersByList(c.Args()) }
+ if len(c.InputArgs) > 0 {
+ containerFunc = func() ([]*libpod.Container, error) { return runtime.GetContainersByList(c.InputArgs) }
} else if latest {
containerFunc = func() ([]*libpod.Container, error) {
lastCtr, err := runtime.GetLatestContainer()
@@ -126,7 +125,7 @@ func statsCmd(c *cli.Context) error {
initialStats, err := ctr.GetContainerStats(&libpod.ContainerStats{})
if err != nil {
// when doing "all", dont worry about containers that are not running
- if c.Bool("all") && errors.Cause(err) == libpod.ErrCtrRemoved || errors.Cause(err) == libpod.ErrNoSuchCtr || errors.Cause(err) == libpod.ErrCtrStateInvalid {
+ if c.All && errors.Cause(err) == libpod.ErrCtrRemoved || errors.Cause(err) == libpod.ErrNoSuchCtr || errors.Cause(err) == libpod.ErrCtrStateInvalid {
continue
}
return err
@@ -134,7 +133,7 @@ func statsCmd(c *cli.Context) error {
containerStats[ctr.ID()] = initialStats
}
- format := genStatsFormat(c.String("format"))
+ format := genStatsFormat(c.Format)
step := 1
if times == -1 {
@@ -168,7 +167,7 @@ func statsCmd(c *cli.Context) error {
if err != nil {
return err
}
- if strings.ToLower(format) != formats.JSONString && !c.Bool("no-reset") {
+ if strings.ToLower(format) != formats.JSONString && !c.NoReset {
tm.Clear()
tm.MoveCursor(1, 1)
tm.Flush()
diff --git a/cmd/podman/stop.go b/cmd/podman/stop.go
index 204515c70..67c15b2a8 100644
--- a/cmd/podman/stop.go
+++ b/cmd/podman/stop.go
@@ -3,27 +3,19 @@ package main
import (
"fmt"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- stopFlags = []cli.Flag{
- cli.UintFlag{
- Name: "timeout, time, t",
- Usage: "Seconds to wait for stop before killing the container",
- Value: libpod.CtrRemoveTimeout,
- },
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Stop all running containers",
- }, LatestFlag,
- }
+ stopCommand cliconfig.StopValues
stopDescription = `
podman stop
@@ -31,36 +23,49 @@ var (
A timeout to forcibly stop the container can also be set but defaults to 10
seconds otherwise.
`
-
- stopCommand = cli.Command{
- Name: "stop",
- Usage: "Stop one or more containers",
- Description: stopDescription,
- Flags: sortFlags(stopFlags),
- Action: stopCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
- OnUsageError: usageErrorHandler,
+ _stopCommand = &cobra.Command{
+ Use: "stop",
+ Short: "Stop one or more containers",
+ Long: stopDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ stopCommand.InputArgs = args
+ stopCommand.GlobalFlags = MainGlobalOpts
+ return stopCmd(&stopCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, false)
+ },
+ Example: `podman stop ctrID
+ podman stop --latest
+ podman stop --timeout 2 mywebserver 6e534f14da9d`,
}
)
-func stopCmd(c *cli.Context) error {
-
- if err := checkAllAndLatest(c); err != nil {
- return err
- }
+func init() {
+ stopCommand.Command = _stopCommand
+ stopCommand.SetUsageTemplate(UsageTemplate())
+ flags := stopCommand.Flags()
+ flags.BoolVarP(&stopCommand.All, "all", "a", false, "Stop all running containers")
+ flags.BoolVarP(&stopCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ flags.UintVar(&stopCommand.Timeout, "time", libpod.CtrRemoveTimeout, "Seconds to wait for stop before killing the container")
+ flags.UintVarP(&stopCommand.Timeout, "timeout", "t", libpod.CtrRemoveTimeout, "Seconds to wait for stop before killing the container")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
- if err := validateFlags(c, stopFlags); err != nil {
- return err
+func stopCmd(c *cliconfig.StopValues) error {
+ if c.Bool("trace") {
+ span, _ := opentracing.StartSpanFromContext(Ctx, "stopCmd")
+ defer span.Finish()
}
rootless.SetSkipStorageSetup(true)
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- containers, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+ containers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running")
if err != nil {
if len(containers) == 0 {
return err
@@ -72,8 +77,8 @@ func stopCmd(c *cli.Context) error {
for _, ctr := range containers {
con := ctr
var stopTimeout uint
- if c.IsSet("timeout") {
- stopTimeout = c.Uint("timeout")
+ if c.Flag("timeout").Changed {
+ stopTimeout = c.Timeout
} else {
stopTimeout = ctr.StopTimeout()
}
@@ -92,7 +97,7 @@ func stopCmd(c *cli.Context) error {
maxWorkers := shared.Parallelize("stop")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
diff --git a/cmd/podman/system.go b/cmd/podman/system.go
index 9596252ad..741b79da5 100644
--- a/cmd/podman/system.go
+++ b/cmd/podman/system.go
@@ -1,29 +1,28 @@
package main
import (
- "sort"
-
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
var (
- systemSubCommands = []cli.Command{
- pruneSystemCommand,
- }
systemDescription = "Manage podman"
- systemCommand = cli.Command{
- Name: "system",
- Usage: "Manage podman",
- Description: systemDescription,
- ArgsUsage: "",
- Subcommands: getSystemSubCommandsSorted(),
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
+
+ systemCommand = cliconfig.PodmanCommand{
+ Command: &cobra.Command{
+ Use: "system",
+ Short: "Manage podman",
+ Long: systemDescription,
+ },
}
)
-func getSystemSubCommandsSorted() []cli.Command {
- systemSubCommands = append(systemSubCommands, getSystemSubCommands()...)
- sort.Sort(commandSortedAlpha{systemSubCommands})
- return systemSubCommands
+var systemCommands = []*cobra.Command{
+ _infoCommand,
+}
+
+func init() {
+ systemCommand.AddCommand(systemCommands...)
+ systemCommand.AddCommand(getSystemSubCommands()...)
+ systemCommand.SetUsageTemplate(UsageTemplate())
}
diff --git a/cmd/podman/system_prune.go b/cmd/podman/system_prune.go
index 64d291560..a823dcad1 100644
--- a/cmd/podman/system_prune.go
+++ b/cmd/podman/system_prune.go
@@ -6,50 +6,50 @@ import (
"os"
"strings"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ pruneSystemCommand cliconfig.SystemPruneValues
pruneSystemDescription = `
podman system prune
Remove unused data
`
- pruneSystemFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Remove all unused data",
+ _pruneSystemCommand = &cobra.Command{
+ Use: "prune",
+ Short: "Remove unused data",
+ Long: pruneSystemDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ pruneSystemCommand.InputArgs = args
+ pruneSystemCommand.GlobalFlags = MainGlobalOpts
+ return pruneSystemCmd(&pruneSystemCommand)
},
- cli.BoolFlag{
- Name: "force, f",
- Usage: "Do not prompt for confirmation",
- },
- cli.BoolFlag{
- Name: "volumes",
- Usage: "Prune volumes",
- },
- }
- pruneSystemCommand = cli.Command{
- Name: "prune",
- Usage: "Remove unused data",
- Description: pruneSystemDescription,
- Action: pruneSystemCmd,
- OnUsageError: usageErrorHandler,
- Flags: pruneSystemFlags,
}
)
-func pruneSystemCmd(c *cli.Context) error {
+func init() {
+ pruneSystemCommand.Command = _pruneSystemCommand
+ pruneSystemCommand.SetUsageTemplate(UsageTemplate())
+ flags := pruneSystemCommand.Flags()
+ flags.BoolVarP(&pruneSystemCommand.All, "all", "a", false, "Remove all unused data")
+ flags.BoolVarP(&pruneSystemCommand.Force, "force", "f", false, "Do not prompt for confirmation")
+ flags.BoolVar(&pruneSystemCommand.Volume, "volumes", false, "Prune volumes")
+
+}
+
+func pruneSystemCmd(c *cliconfig.SystemPruneValues) error {
// Prompt for confirmation if --force is not set
- if !c.Bool("force") {
+ if !c.Force {
reader := bufio.NewReader(os.Stdin)
volumeString := ""
- if c.Bool("volumes") {
+ if c.Volume {
volumeString = `
- all volumes not used by at least one container`
}
@@ -68,7 +68,7 @@ Are you sure you want to continue? [y/N] `, volumeString)
}
}
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
@@ -76,7 +76,7 @@ Are you sure you want to continue? [y/N] `, volumeString)
ctx := getContext()
fmt.Println("Deleted Containers")
- lasterr := pruneContainers(runtime, ctx, shared.Parallelize("rm"), false)
+ lasterr := pruneContainers(runtime, ctx, shared.Parallelize("rm"), false, false)
if c.Bool("volumes") {
fmt.Println("Deleted Volumes")
err := volumePrune(runtime, getContext())
@@ -90,7 +90,7 @@ Are you sure you want to continue? [y/N] `, volumeString)
// Call prune; if any cids are returned, print them and then
// return err in case an error also came up
- pruneCids, err := runtime.PruneImages(c.Bool("all"))
+ pruneCids, err := runtime.PruneImages(c.All)
if len(pruneCids) > 0 {
fmt.Println("Deleted Images")
for _, cid := range pruneCids {
diff --git a/cmd/podman/system_renumber.go b/cmd/podman/system_renumber.go
new file mode 100644
index 000000000..c8ce628b1
--- /dev/null
+++ b/cmd/podman/system_renumber.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+)
+
+var (
+ renumberCommand cliconfig.SystemRenumberValues
+ renumberDescription = `
+ podman system renumber
+
+ Migrate lock numbers to handle a change in maximum number of locks.
+ Mandatory after the number of locks in libpod.conf is changed.
+`
+
+ _renumberCommand = &cobra.Command{
+ Use: "renumber",
+ Short: "Migrate lock numbers",
+ Long: renumberDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ renumberCommand.InputArgs = args
+ renumberCommand.GlobalFlags = MainGlobalOpts
+ return renumberCmd(&renumberCommand)
+ },
+ }
+)
+
+func init() {
+ renumberCommand.Command = _renumberCommand
+ renumberCommand.SetUsageTemplate(UsageTemplate())
+}
+
+func renumberCmd(c *cliconfig.SystemRenumberValues) error {
+ // We need to pass one extra option to NewRuntime.
+ // This will inform the OCI runtime to start a renumber.
+ // That's controlled by the last argument to GetRuntime.
+ r, err := libpodruntime.GetRuntimeRenumber(&c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "error renumbering locks")
+ }
+ if err := r.Shutdown(false); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/cmd/podman/tag.go b/cmd/podman/tag.go
index d19cf69a2..2b9d67066 100644
--- a/cmd/podman/tag.go
+++ b/cmd/podman/tag.go
@@ -1,29 +1,42 @@
package main
import (
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ tagCommand cliconfig.TagValues
+
tagDescription = "Adds one or more additional names to locally-stored image"
- tagCommand = cli.Command{
- Name: "tag",
- Usage: "Add an additional name to a local image",
- Description: tagDescription,
- Action: tagCmd,
- ArgsUsage: "IMAGE-NAME [IMAGE-NAME ...]",
- OnUsageError: usageErrorHandler,
+ _tagCommand = &cobra.Command{
+ Use: "tag",
+ Short: "Add an additional name to a local image",
+ Long: tagDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ tagCommand.InputArgs = args
+ tagCommand.GlobalFlags = MainGlobalOpts
+ return tagCmd(&tagCommand)
+ },
+ Example: `podman tag 0e3bbc2 fedora:latest
+ podman tag imageID:latest myNewImage:newTag
+ podman tag httpd myregistryhost:5000/fedora/httpd:v2`,
}
)
-func tagCmd(c *cli.Context) error {
- args := c.Args()
+func init() {
+ tagCommand.Command = _tagCommand
+ tagCommand.SetUsageTemplate(UsageTemplate())
+}
+
+func tagCmd(c *cliconfig.TagValues) error {
+ args := c.InputArgs
if len(args) < 2 {
return errors.Errorf("image name and at least one new name must be specified")
}
- runtime, err := adapter.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
diff --git a/cmd/podman/top.go b/cmd/podman/top.go
index 3012265ea..36d6bb6b4 100644
--- a/cmd/podman/top.go
+++ b/cmd/podman/top.go
@@ -6,11 +6,12 @@ import (
"strings"
"text/tabwriter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
func getDescriptorString() string {
@@ -24,13 +25,7 @@ Format Descriptors:
}
var (
- topFlags = []cli.Flag{
- LatestFlag,
- cli.BoolFlag{
- Name: "list-descriptors",
- Hidden: true,
- },
- }
+ topCommand cliconfig.TopValues
topDescription = fmt.Sprintf(`Display the running processes of the container. Specify format descriptors
to alter the output. You may run "podman top -l pid pcpu seccomp" to print
the process ID, the CPU percentage and the seccomp mode of each process of
@@ -38,24 +33,37 @@ the latest container.
%s
`, getDescriptorString())
- topCommand = cli.Command{
- Name: "top",
- Usage: "Display the running processes of a container",
- Description: topDescription,
- Flags: sortFlags(topFlags),
- Action: topCmd,
- ArgsUsage: "CONTAINER-NAME [format descriptors]",
- SkipArgReorder: true,
- OnUsageError: usageErrorHandler,
+ _topCommand = &cobra.Command{
+ Use: "top",
+ Short: "Display the running processes of a container",
+ Long: topDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ topCommand.InputArgs = args
+ topCommand.GlobalFlags = MainGlobalOpts
+ return topCmd(&topCommand)
+ },
+ Example: `podman top ctrID
+ podman top --latest
+ podman top ctrID pid seccomp args %C`,
}
)
-func topCmd(c *cli.Context) error {
+func init() {
+ topCommand.Command = _topCommand
+ topCommand.SetUsageTemplate(UsageTemplate())
+ flags := topCommand.Flags()
+ flags.BoolVar(&topCommand.ListDescriptors, "list-descriptors", false, "")
+ flags.MarkHidden("list-descriptors")
+ flags.BoolVarP(&topCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func topCmd(c *cliconfig.TopValues) error {
var container *libpod.Container
var err error
- args := c.Args()
+ args := c.InputArgs
- if c.Bool("list-descriptors") {
+ if c.ListDescriptors {
descriptors, err := libpod.GetContainerPidInformationDescriptors()
if err != nil {
return err
@@ -64,22 +72,19 @@ func topCmd(c *cli.Context) error {
return nil
}
- if len(args) < 1 && !c.Bool("latest") {
+ if len(args) < 1 && !c.Latest {
return errors.Errorf("you must provide the name or id of a running container")
}
- if err := validateFlags(c, topFlags); err != nil {
- return err
- }
rootless.SetSkipStorageSetup(true)
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
var descriptors []string
- if c.Bool("latest") {
+ if c.Latest {
descriptors = args
container, err = runtime.GetLatestContainer()
} else {
diff --git a/cmd/podman/trust.go b/cmd/podman/trust.go
index a99be6ba2..8b02dcdc6 100644
--- a/cmd/podman/trust.go
+++ b/cmd/podman/trust.go
@@ -1,369 +1,22 @@
package main
import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "os"
- "sort"
- "strings"
-
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/formats"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod/image"
- "github.com/containers/libpod/pkg/trust"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
var (
- setTrustFlags = []cli.Flag{
- cli.StringFlag{
- Name: "type, t",
- Usage: "Trust type, accept values: signedBy(default), accept, reject.",
- Value: "signedBy",
- },
- cli.StringSliceFlag{
- Name: "pubkeysfile, f",
- Usage: `Path of installed public key(s) to trust for TARGET.
- Absolute path to keys is added to policy.json. May
- used multiple times to define multiple public keys.
- File(s) must exist before using this command.`,
- },
- cli.StringFlag{
- Name: "policypath",
- Hidden: true,
- },
- }
- showTrustFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "raw",
- Usage: "Output raw policy file",
+ trustCommand = cliconfig.PodmanCommand{
+ Command: &cobra.Command{
+ Use: "trust",
+ Short: "Manage container image trust policy",
+ Long: "podman image trust command",
},
- cli.BoolFlag{
- Name: "json, j",
- Usage: "Output as json",
- },
- cli.StringFlag{
- Name: "policypath",
- Hidden: true,
- },
- cli.StringFlag{
- Name: "registrypath",
- Hidden: true,
- },
- }
-
- setTrustDescription = "Set default trust policy or add a new trust policy for a registry"
- setTrustCommand = cli.Command{
- Name: "set",
- Usage: "Set default trust policy or a new trust policy for a registry",
- Description: setTrustDescription,
- Flags: sortFlags(setTrustFlags),
- ArgsUsage: "default | REGISTRY[/REPOSITORY]",
- Action: setTrustCmd,
- OnUsageError: usageErrorHandler,
- }
-
- showTrustDescription = "Display trust policy for the system"
- showTrustCommand = cli.Command{
- Name: "show",
- Usage: "Display trust policy for the system",
- Description: showTrustDescription,
- Flags: sortFlags(showTrustFlags),
- Action: showTrustCmd,
- ArgsUsage: "",
- UseShortOptionHandling: true,
- OnUsageError: usageErrorHandler,
- }
-
- trustSubCommands = []cli.Command{
- setTrustCommand,
- showTrustCommand,
- }
-
- trustDescription = fmt.Sprintf(`Manages the trust policy of the host system. (%s)
- Trust policy describes a registry scope that must be signed by public keys.`, getDefaultPolicyPath())
- trustCommand = cli.Command{
- Name: "trust",
- Usage: "Manage container image trust policy",
- Description: trustDescription,
- ArgsUsage: "{set,show} ...",
- Subcommands: trustSubCommands,
- OnUsageError: usageErrorHandler,
}
)
-func showTrustCmd(c *cli.Context) error {
- runtime, err := libpodruntime.GetRuntime(c)
- if err != nil {
- return errors.Wrapf(err, "could not create runtime")
- }
-
- var (
- policyPath string
- systemRegistriesDirPath string
- outjson interface{}
- )
- if c.IsSet("policypath") {
- policyPath = c.String("policypath")
- } else {
- policyPath = trust.DefaultPolicyPath(runtime.SystemContext())
- }
- policyContent, err := ioutil.ReadFile(policyPath)
- if err != nil {
- return errors.Wrapf(err, "unable to read %s", policyPath)
- }
- if c.IsSet("registrypath") {
- systemRegistriesDirPath = c.String("registrypath")
- } else {
- systemRegistriesDirPath = trust.RegistriesDirPath(runtime.SystemContext())
- }
-
- if c.Bool("raw") {
- _, err := os.Stdout.Write(policyContent)
- if err != nil {
- return errors.Wrap(err, "could not read trust policies")
- }
- return nil
- }
-
- policyContentStruct, err := trust.GetPolicy(policyPath)
- if err != nil {
- return errors.Wrapf(err, "could not read trust policies")
- }
-
- if c.Bool("json") {
- policyJSON, err := getPolicyJSON(policyContentStruct, systemRegistriesDirPath)
- if err != nil {
- return errors.Wrapf(err, "could not show trust policies in JSON format")
- }
- outjson = policyJSON
- out := formats.JSONStruct{Output: outjson}
- return formats.Writer(out).Out()
- }
-
- showOutputMap, err := getPolicyShowOutput(policyContentStruct, systemRegistriesDirPath)
- if err != nil {
- return errors.Wrapf(err, "could not show trust policies")
- }
- out := formats.StdoutTemplateArray{Output: showOutputMap, Template: "{{.Repo}}\t{{.Trusttype}}\t{{.GPGid}}\t{{.Sigstore}}"}
- return formats.Writer(out).Out()
-}
-
-func setTrustCmd(c *cli.Context) error {
- runtime, err := libpodruntime.GetRuntime(c)
- if err != nil {
- return errors.Wrapf(err, "could not create runtime")
- }
- var (
- policyPath string
- policyContentStruct trust.PolicyContent
- newReposContent []trust.RepoContent
- )
- args := c.Args()
- if len(args) != 1 {
- return errors.Errorf("default or a registry name must be specified")
- }
- valid, err := image.IsValidImageURI(args[0])
- if err != nil || !valid {
- return errors.Wrapf(err, "invalid image uri %s", args[0])
- }
-
- trusttype := c.String("type")
- if !isValidTrustType(trusttype) {
- return errors.Errorf("invalid choice: %s (choose from 'accept', 'reject', 'signedBy')", trusttype)
- }
- if trusttype == "accept" {
- trusttype = "insecureAcceptAnything"
- }
-
- pubkeysfile := c.StringSlice("pubkeysfile")
- if len(pubkeysfile) == 0 && trusttype == "signedBy" {
- return errors.Errorf("At least one public key must be defined for type 'signedBy'")
- }
-
- if c.IsSet("policypath") {
- policyPath = c.String("policypath")
- } else {
- policyPath = trust.DefaultPolicyPath(runtime.SystemContext())
- }
- _, err = os.Stat(policyPath)
- if !os.IsNotExist(err) {
- policyContent, err := ioutil.ReadFile(policyPath)
- if err != nil {
- return errors.Wrapf(err, "unable to read %s", policyPath)
- }
- if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil {
- return errors.Errorf("could not read trust policies")
- }
- }
- if len(pubkeysfile) != 0 {
- for _, filepath := range pubkeysfile {
- newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype, KeyType: "GPGKeys", KeyPath: filepath})
- }
- } else {
- newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype})
- }
- if args[0] == "default" {
- policyContentStruct.Default = newReposContent
- } else {
- if len(policyContentStruct.Default) == 0 {
- return errors.Errorf("Default trust policy must be set.")
- }
- registryExists := false
- for transport, transportval := range policyContentStruct.Transports {
- _, registryExists = transportval[args[0]]
- if registryExists {
- policyContentStruct.Transports[transport][args[0]] = newReposContent
- break
- }
- }
- if !registryExists {
- if policyContentStruct.Transports == nil {
- policyContentStruct.Transports = make(map[string]trust.RepoMap)
- }
- if policyContentStruct.Transports["docker"] == nil {
- policyContentStruct.Transports["docker"] = make(map[string][]trust.RepoContent)
- }
- policyContentStruct.Transports["docker"][args[0]] = append(policyContentStruct.Transports["docker"][args[0]], newReposContent...)
- }
- }
-
- data, err := json.MarshalIndent(policyContentStruct, "", " ")
- if err != nil {
- return errors.Wrapf(err, "error setting trust policy")
- }
- err = ioutil.WriteFile(policyPath, data, 0644)
- if err != nil {
- return errors.Wrapf(err, "error setting trust policy")
- }
- return nil
-}
-
-func sortShowOutputMapKey(m map[string]trust.ShowOutput) []string {
- keys := make([]string, len(m))
- i := 0
- for k := range m {
- keys[i] = k
- i++
- }
- sort.Strings(keys)
- return keys
-}
-
-func isValidTrustType(t string) bool {
- if t == "accept" || t == "insecureAcceptAnything" || t == "reject" || t == "signedBy" {
- return true
- }
- return false
-}
-
-func getDefaultPolicyPath() string {
- return trust.DefaultPolicyPath(&types.SystemContext{})
-}
-
-func getPolicyJSON(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) (map[string]map[string]interface{}, error) {
- registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
- if err != nil {
- return nil, err
- }
-
- policyJSON := make(map[string]map[string]interface{})
- if len(policyContentStruct.Default) > 0 {
- policyJSON["* (default)"] = make(map[string]interface{})
- policyJSON["* (default)"]["type"] = policyContentStruct.Default[0].Type
- }
- for transname, transval := range policyContentStruct.Transports {
- for repo, repoval := range transval {
- policyJSON[repo] = make(map[string]interface{})
- policyJSON[repo]["type"] = repoval[0].Type
- policyJSON[repo]["transport"] = transname
- keyarr := []string{}
- uids := []string{}
- for _, repoele := range repoval {
- if len(repoele.KeyPath) > 0 {
- keyarr = append(keyarr, repoele.KeyPath)
- uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...)
- }
- if len(repoele.KeyData) > 0 {
- keyarr = append(keyarr, string(repoele.KeyData))
- uids = append(uids, trust.GetGPGIdFromKeyData(string(repoele.KeyData))...)
- }
- }
- policyJSON[repo]["keys"] = keyarr
- policyJSON[repo]["sigstore"] = ""
- registryNamespace := trust.HaveMatchRegistry(repo, registryConfigs)
- if registryNamespace != nil {
- policyJSON[repo]["sigstore"] = registryNamespace.SigStore
- }
- }
- }
- return policyJSON, nil
-}
-
-var typeDescription = map[string]string{"insecureAcceptAnything": "accept", "signedBy": "signed", "reject": "reject"}
-
-func trustTypeDescription(trustType string) string {
- trustDescription, exist := typeDescription[trustType]
- if !exist {
- logrus.Warnf("invalid trust type %s", trustType)
- }
- return trustDescription
-}
-
-func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) ([]interface{}, error) {
- var output []interface{}
-
- registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
- if err != nil {
- return nil, err
- }
-
- trustShowOutputMap := make(map[string]trust.ShowOutput)
- if len(policyContentStruct.Default) > 0 {
- defaultPolicyStruct := trust.ShowOutput{
- Repo: "default",
- Trusttype: trustTypeDescription(policyContentStruct.Default[0].Type),
- }
- trustShowOutputMap["* (default)"] = defaultPolicyStruct
- }
- for _, transval := range policyContentStruct.Transports {
- for repo, repoval := range transval {
- tempTrustShowOutput := trust.ShowOutput{
- Repo: repo,
- Trusttype: repoval[0].Type,
- }
- keyarr := []string{}
- uids := []string{}
- for _, repoele := range repoval {
- if len(repoele.KeyPath) > 0 {
- keyarr = append(keyarr, repoele.KeyPath)
- uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...)
- }
- if len(repoele.KeyData) > 0 {
- keyarr = append(keyarr, string(repoele.KeyData))
- uids = append(uids, trust.GetGPGIdFromKeyData(string(repoele.KeyData))...)
- }
- }
- tempTrustShowOutput.GPGid = strings.Join(uids, ", ")
-
- registryNamespace := trust.HaveMatchRegistry(repo, registryConfigs)
- if registryNamespace != nil {
- tempTrustShowOutput.Sigstore = registryNamespace.SigStore
- }
- trustShowOutputMap[repo] = tempTrustShowOutput
- }
- }
-
- sortedRepos := sortShowOutputMapKey(trustShowOutputMap)
- for _, reponame := range sortedRepos {
- showOutput, exists := trustShowOutputMap[reponame]
- if exists {
- output = append(output, interface{}(showOutput))
- }
- }
- return output, nil
+func init() {
+ trustCommand.SetUsageTemplate(UsageTemplate())
+ trustCommand.AddCommand(getTrustSubCommands()...)
+ imageCommand.AddCommand(trustCommand.Command)
}
diff --git a/cmd/podman/trust_set_show.go b/cmd/podman/trust_set_show.go
new file mode 100644
index 000000000..0a4783d0a
--- /dev/null
+++ b/cmd/podman/trust_set_show.go
@@ -0,0 +1,344 @@
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/formats"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/trust"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var (
+ setTrustCommand cliconfig.SetTrustValues
+ showTrustCommand cliconfig.ShowTrustValues
+ setTrustDescription = "Set default trust policy or add a new trust policy for a registry"
+ _setTrustCommand = &cobra.Command{
+ Use: "set",
+ Short: "Set default trust policy or a new trust policy for a registry",
+ Long: setTrustDescription,
+ Example: "",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ setTrustCommand.InputArgs = args
+ setTrustCommand.GlobalFlags = MainGlobalOpts
+ return setTrustCmd(&setTrustCommand)
+ },
+ }
+
+ showTrustDescription = "Display trust policy for the system"
+ _showTrustCommand = &cobra.Command{
+ Use: "show",
+ Short: "Display trust policy for the system",
+ Long: showTrustDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ showTrustCommand.InputArgs = args
+ showTrustCommand.GlobalFlags = MainGlobalOpts
+ return showTrustCmd(&showTrustCommand)
+ },
+ Example: "",
+ }
+)
+
+func init() {
+ setTrustCommand.Command = _setTrustCommand
+ setTrustCommand.SetUsageTemplate(UsageTemplate())
+ showTrustCommand.Command = _showTrustCommand
+ showTrustCommand.SetUsageTemplate(UsageTemplate())
+ setFlags := setTrustCommand.Flags()
+ setFlags.StringVar(&setTrustCommand.PolicyPath, "policypath", "", "")
+ setFlags.MarkHidden("policypath")
+ setFlags.StringSliceVarP(&setTrustCommand.PubKeysFile, "pubkeysfile", "f", []string{}, `Path of installed public key(s) to trust for TARGET.
+Absolute path to keys is added to policy.json. May
+used multiple times to define multiple public keys.
+File(s) must exist before using this command`)
+ setFlags.StringVarP(&setTrustCommand.TrustType, "type", "t", "signedBy", "Trust type, accept values: signedBy(default), accept, reject")
+
+ showFlags := showTrustCommand.Flags()
+ showFlags.BoolVarP(&showTrustCommand.Json, "json", "j", false, "Output as json")
+ showFlags.StringVar(&showTrustCommand.PolicyPath, "policypath", "", "")
+ showFlags.BoolVar(&showTrustCommand.Raw, "raw", false, "Output raw policy file")
+ showFlags.MarkHidden("policypath")
+ showFlags.StringVar(&showTrustCommand.RegistryPath, "registrypath", "", "")
+ showFlags.MarkHidden("registrypath")
+}
+
+func showTrustCmd(c *cliconfig.ShowTrustValues) error {
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "could not create runtime")
+ }
+
+ var (
+ policyPath string
+ systemRegistriesDirPath string
+ outjson interface{}
+ )
+ if c.Flag("policypath").Changed {
+ policyPath = c.PolicyPath
+ } else {
+ policyPath = trust.DefaultPolicyPath(runtime.SystemContext())
+ }
+ policyContent, err := ioutil.ReadFile(policyPath)
+ if err != nil {
+ return errors.Wrapf(err, "unable to read %s", policyPath)
+ }
+ if c.Flag("registrypath").Changed {
+ systemRegistriesDirPath = c.RegistryPath
+ } else {
+ systemRegistriesDirPath = trust.RegistriesDirPath(runtime.SystemContext())
+ }
+
+ if c.Raw {
+ _, err := os.Stdout.Write(policyContent)
+ if err != nil {
+ return errors.Wrap(err, "could not read raw trust policies")
+ }
+ return nil
+ }
+
+ policyContentStruct, err := trust.GetPolicy(policyPath)
+ if err != nil {
+ return errors.Wrapf(err, "could not read trust policies")
+ }
+
+ if c.Json {
+ policyJSON, err := getPolicyJSON(policyContentStruct, systemRegistriesDirPath)
+ if err != nil {
+ return errors.Wrapf(err, "could not show trust policies in JSON format")
+ }
+ outjson = policyJSON
+ out := formats.JSONStruct{Output: outjson}
+ return formats.Writer(out).Out()
+ }
+
+ showOutputMap, err := getPolicyShowOutput(policyContentStruct, systemRegistriesDirPath)
+ if err != nil {
+ return errors.Wrapf(err, "could not show trust policies")
+ }
+ out := formats.StdoutTemplateArray{Output: showOutputMap, Template: "{{.Repo}}\t{{.Trusttype}}\t{{.GPGid}}\t{{.Sigstore}}"}
+ return formats.Writer(out).Out()
+}
+
+func setTrustCmd(c *cliconfig.SetTrustValues) error {
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "could not create runtime")
+ }
+ var (
+ policyPath string
+ policyContentStruct trust.PolicyContent
+ newReposContent []trust.RepoContent
+ )
+ args := c.InputArgs
+ if len(args) != 1 {
+ return errors.Errorf("default or a registry name must be specified")
+ }
+ valid, err := image.IsValidImageURI(args[0])
+ if err != nil || !valid {
+ return errors.Wrapf(err, "invalid image uri %s", args[0])
+ }
+
+ trusttype := c.TrustType
+ if !isValidTrustType(trusttype) {
+ return errors.Errorf("invalid choice: %s (choose from 'accept', 'reject', 'signedBy')", trusttype)
+ }
+ if trusttype == "accept" {
+ trusttype = "insecureAcceptAnything"
+ }
+
+ pubkeysfile := c.PubKeysFile
+ if len(pubkeysfile) == 0 && trusttype == "signedBy" {
+ return errors.Errorf("At least one public key must be defined for type 'signedBy'")
+ }
+
+ if c.Flag("policypath").Changed {
+ policyPath = c.PolicyPath
+ } else {
+ policyPath = trust.DefaultPolicyPath(runtime.SystemContext())
+ }
+ _, err = os.Stat(policyPath)
+ if !os.IsNotExist(err) {
+ policyContent, err := ioutil.ReadFile(policyPath)
+ if err != nil {
+ return errors.Wrapf(err, "unable to read %s", policyPath)
+ }
+ if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil {
+ return errors.Errorf("could not read trust policies")
+ }
+ }
+ if len(pubkeysfile) != 0 {
+ for _, filepath := range pubkeysfile {
+ newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype, KeyType: "GPGKeys", KeyPath: filepath})
+ }
+ } else {
+ newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype})
+ }
+ if args[0] == "default" {
+ policyContentStruct.Default = newReposContent
+ } else {
+ if len(policyContentStruct.Default) == 0 {
+ return errors.Errorf("Default trust policy must be set.")
+ }
+ registryExists := false
+ for transport, transportval := range policyContentStruct.Transports {
+ _, registryExists = transportval[args[0]]
+ if registryExists {
+ policyContentStruct.Transports[transport][args[0]] = newReposContent
+ break
+ }
+ }
+ if !registryExists {
+ if policyContentStruct.Transports == nil {
+ policyContentStruct.Transports = make(map[string]trust.RepoMap)
+ }
+ if policyContentStruct.Transports["docker"] == nil {
+ policyContentStruct.Transports["docker"] = make(map[string][]trust.RepoContent)
+ }
+ policyContentStruct.Transports["docker"][args[0]] = append(policyContentStruct.Transports["docker"][args[0]], newReposContent...)
+ }
+ }
+
+ data, err := json.MarshalIndent(policyContentStruct, "", " ")
+ if err != nil {
+ return errors.Wrapf(err, "error setting trust policy")
+ }
+ err = ioutil.WriteFile(policyPath, data, 0644)
+ if err != nil {
+ return errors.Wrapf(err, "error setting trust policy")
+ }
+ return nil
+}
+
+func sortShowOutputMapKey(m map[string]trust.ShowOutput) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func isValidTrustType(t string) bool {
+ if t == "accept" || t == "insecureAcceptAnything" || t == "reject" || t == "signedBy" {
+ return true
+ }
+ return false
+}
+
+func getDefaultPolicyPath() string {
+ return trust.DefaultPolicyPath(&types.SystemContext{})
+}
+
+func getPolicyJSON(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) (map[string]map[string]interface{}, error) {
+ registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ policyJSON := make(map[string]map[string]interface{})
+ if len(policyContentStruct.Default) > 0 {
+ policyJSON["* (default)"] = make(map[string]interface{})
+ policyJSON["* (default)"]["type"] = policyContentStruct.Default[0].Type
+ }
+ for transname, transval := range policyContentStruct.Transports {
+ for repo, repoval := range transval {
+ policyJSON[repo] = make(map[string]interface{})
+ policyJSON[repo]["type"] = repoval[0].Type
+ policyJSON[repo]["transport"] = transname
+ keyarr := []string{}
+ uids := []string{}
+ for _, repoele := range repoval {
+ if len(repoele.KeyPath) > 0 {
+ keyarr = append(keyarr, repoele.KeyPath)
+ uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...)
+ }
+ if len(repoele.KeyData) > 0 {
+ keyarr = append(keyarr, string(repoele.KeyData))
+ uids = append(uids, trust.GetGPGIdFromKeyData(string(repoele.KeyData))...)
+ }
+ }
+ policyJSON[repo]["keys"] = keyarr
+ policyJSON[repo]["sigstore"] = ""
+ registryNamespace := trust.HaveMatchRegistry(repo, registryConfigs)
+ if registryNamespace != nil {
+ policyJSON[repo]["sigstore"] = registryNamespace.SigStore
+ }
+ }
+ }
+ return policyJSON, nil
+}
+
+var typeDescription = map[string]string{"insecureAcceptAnything": "accept", "signedBy": "signed", "reject": "reject"}
+
+func trustTypeDescription(trustType string) string {
+ trustDescription, exist := typeDescription[trustType]
+ if !exist {
+ logrus.Warnf("invalid trust type %s", trustType)
+ }
+ return trustDescription
+}
+
+func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) ([]interface{}, error) {
+ var output []interface{}
+
+ registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ trustShowOutputMap := make(map[string]trust.ShowOutput)
+ if len(policyContentStruct.Default) > 0 {
+ defaultPolicyStruct := trust.ShowOutput{
+ Repo: "default",
+ Trusttype: trustTypeDescription(policyContentStruct.Default[0].Type),
+ }
+ trustShowOutputMap["* (default)"] = defaultPolicyStruct
+ }
+ for _, transval := range policyContentStruct.Transports {
+ for repo, repoval := range transval {
+ tempTrustShowOutput := trust.ShowOutput{
+ Repo: repo,
+ Trusttype: repoval[0].Type,
+ }
+ keyarr := []string{}
+ uids := []string{}
+ for _, repoele := range repoval {
+ if len(repoele.KeyPath) > 0 {
+ keyarr = append(keyarr, repoele.KeyPath)
+ uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...)
+ }
+ if len(repoele.KeyData) > 0 {
+ keyarr = append(keyarr, string(repoele.KeyData))
+ uids = append(uids, trust.GetGPGIdFromKeyData(string(repoele.KeyData))...)
+ }
+ }
+ tempTrustShowOutput.GPGid = strings.Join(uids, ", ")
+
+ registryNamespace := trust.HaveMatchRegistry(repo, registryConfigs)
+ if registryNamespace != nil {
+ tempTrustShowOutput.Sigstore = registryNamespace.SigStore
+ }
+ trustShowOutputMap[repo] = tempTrustShowOutput
+ }
+ }
+
+ sortedRepos := sortShowOutputMapKey(trustShowOutputMap)
+ for _, reponame := range sortedRepos {
+ showOutput, exists := trustShowOutputMap[reponame]
+ if exists {
+ output = append(output, interface{}(showOutput))
+ }
+ }
+ return output, nil
+}
diff --git a/cmd/podman/umount.go b/cmd/podman/umount.go
index ab6925e65..6d9009388 100644
--- a/cmd/podman/umount.go
+++ b/cmd/podman/umount.go
@@ -3,60 +3,64 @@ package main
import (
"fmt"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- umountFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Umount all of the currently mounted containers",
- },
- cli.BoolFlag{
- Name: "force, f",
- Usage: "Force the complete umount all of the currently mounted containers",
- },
- LatestFlag,
- }
-
- description = `
+ umountCommand cliconfig.UmountValues
+ description = `
Container storage increments a mount counter each time a container is mounted.
When a container is unmounted, the mount counter is decremented and the
container's root filesystem is physically unmounted only when the mount
counter reaches zero indicating no other processes are using the mount.
An unmount can be forced with the --force flag.
`
- umountCommand = cli.Command{
- Name: "umount",
- Aliases: []string{"unmount"},
- Usage: "Unmount working container's root filesystem",
- Description: description,
- Flags: sortFlags(umountFlags),
- Action: umountCmd,
- ArgsUsage: "CONTAINER-NAME-OR-ID",
- OnUsageError: usageErrorHandler,
+ _umountCommand = &cobra.Command{
+ Use: "umount",
+ Aliases: []string{"unmount"},
+ Short: "Unmounts working container's root filesystem",
+ Long: description,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ umountCommand.InputArgs = args
+ umountCommand.GlobalFlags = MainGlobalOpts
+ return umountCmd(&umountCommand)
+ },
+ Args: func(cmd *cobra.Command, args []string) error {
+ return checkAllAndLatest(cmd, args, true)
+ },
+ Example: `podman umount ctrID
+ podman umount ctrID1 ctrID2 ctrID3
+ podman umount --all`,
}
)
-func umountCmd(c *cli.Context) error {
- runtime, err := libpodruntime.GetRuntime(c)
+func init() {
+ umountCommand.Command = _umountCommand
+ umountCommand.SetUsageTemplate(UsageTemplate())
+ flags := umountCommand.Flags()
+ flags.BoolVarP(&umountCommand.All, "all", "a", false, "Umount all of the currently mounted containers")
+ flags.BoolVarP(&umountCommand.Force, "force", "f", false, "Force the complete umount all of the currently mounted containers")
+ flags.BoolVarP(&umountCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func umountCmd(c *cliconfig.UmountValues) error {
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- force := c.Bool("force")
- umountAll := c.Bool("all")
- if err := checkAllAndLatest(c); err != nil {
- return err
- }
+ force := c.Force
+ umountAll := c.All
- containers, err := getAllOrLatestContainers(c, runtime, -1, "all")
+ containers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
if err != nil {
if len(containers) == 0 {
return err
diff --git a/cmd/podman/unpause.go b/cmd/podman/unpause.go
index 91b5fda33..efd9a20a3 100644
--- a/cmd/podman/unpause.go
+++ b/cmd/podman/unpause.go
@@ -3,38 +3,45 @@ package main
import (
"os"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
- unpauseFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Unpause all paused containers",
- },
- }
+ unpauseCommand cliconfig.UnpauseValues
+
unpauseDescription = `
podman unpause
Unpauses one or more running containers. The container name or ID can be used.
`
- unpauseCommand = cli.Command{
- Name: "unpause",
- Usage: "Unpause the processes in one or more containers",
- Description: unpauseDescription,
- Flags: unpauseFlags,
- Action: unpauseCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
- OnUsageError: usageErrorHandler,
+ _unpauseCommand = &cobra.Command{
+ Use: "unpause",
+ Short: "Unpause the processes in one or more containers",
+ Long: unpauseDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ unpauseCommand.InputArgs = args
+ unpauseCommand.GlobalFlags = MainGlobalOpts
+ return unpauseCmd(&unpauseCommand)
+ },
+ Example: `podman unpause ctrID
+ podman unpause --all`,
}
)
-func unpauseCmd(c *cli.Context) error {
+func init() {
+ unpauseCommand.Command = _unpauseCommand
+ unpauseCommand.SetUsageTemplate(UsageTemplate())
+ flags := unpauseCommand.Flags()
+ flags.BoolVarP(&unpauseCommand.All, "all", "a", false, "Unpause all paused containers")
+}
+
+func unpauseCmd(c *cliconfig.UnpauseValues) error {
var (
unpauseContainers []*libpod.Container
unpauseFuncs []shared.ParallelWorkerInput
@@ -43,18 +50,18 @@ func unpauseCmd(c *cli.Context) error {
return errors.New("unpause is not supported for rootless containers")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- args := c.Args()
- if len(args) < 1 && !c.Bool("all") {
+ args := c.InputArgs
+ if len(args) < 1 && !c.All {
return errors.Errorf("you must provide at least one container name or id")
}
- if c.Bool("all") {
- cs, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStatePaused, "paused")
+ if c.All {
+ cs, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStatePaused, "paused")
if err != nil {
return err
}
@@ -84,7 +91,7 @@ func unpauseCmd(c *cli.Context) error {
maxWorkers := shared.Parallelize("unpause")
if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
+ maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go
index a59535b43..4ec0f8a13 100644
--- a/cmd/podman/utils.go
+++ b/cmd/podman/utils.go
@@ -3,15 +3,16 @@ package main
import (
"context"
"fmt"
+ "github.com/spf13/pflag"
"os"
gosignal "os/signal"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/term"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/client-go/tools/remotecommand"
)
@@ -20,7 +21,7 @@ type RawTtyFormatter struct {
}
// Start (if required) and attach to a container
-func startAttachCtr(ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool) error {
+func startAttachCtr(ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error {
ctx := context.Background()
resize := make(chan remotecommand.TerminalSize)
@@ -76,7 +77,7 @@ func startAttachCtr(ctr *libpod.Container, stdout, stderr, stdin *os.File, detac
return ctr.Attach(streams, detachKeys, resize)
}
- attachChan, err := ctr.StartAndAttach(getContext(), streams, detachKeys, resize)
+ attachChan, err := ctr.StartAndAttach(getContext(), streams, detachKeys, resize, recursive)
if err != nil {
return err
}
@@ -158,30 +159,20 @@ func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
return bytes, err
}
-func checkMutuallyExclusiveFlags(c *cli.Context) error {
- if err := checkAllAndLatest(c); err != nil {
- return err
- }
- if err := validateFlags(c, startFlags); err != nil {
- return err
- }
- return nil
-}
-
// For pod commands that have a latest and all flag, getPodsFromContext gets
// pods the user specifies. If there's an error before getting pods, the pods slice
// will be empty and error will be not nil. If an error occured after, the pod slice
// will hold all of the successful pods, and error will hold the last error.
// The remaining errors will be logged. On success, pods will hold all pods and
// error will be nil.
-func getPodsFromContext(c *cli.Context, r *libpod.Runtime) ([]*libpod.Pod, error) {
- args := c.Args()
+func getPodsFromContext(c *cliconfig.PodmanCommand, r *libpod.Runtime) ([]*libpod.Pod, error) {
+ args := c.InputArgs
var pods []*libpod.Pod
var lastError error
var err error
if c.Bool("all") {
- pods, err = r.Pods()
+ pods, err = r.GetAllPods()
if err != nil {
return nil, errors.Wrapf(err, "unable to get running pods")
}
@@ -209,8 +200,8 @@ func getPodsFromContext(c *cli.Context, r *libpod.Runtime) ([]*libpod.Pod, error
return pods, lastError
}
-func getVolumesFromContext(c *cli.Context, r *libpod.Runtime) ([]*libpod.Volume, error) {
- args := c.Args()
+func getVolumesFromContext(c *cliconfig.PodmanCommand, r *libpod.Runtime) ([]*libpod.Volume, error) {
+ args := c.InputArgs
var (
vols []*libpod.Volume
lastError error
@@ -254,3 +245,11 @@ func printParallelOutput(m map[string]error, errCount int) error {
}
return lastError
}
+
+// markFlagHiddenForRemoteClient makes the flag not appear as part of the CLI
+// on the remote-client
+func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
+ if remoteclient {
+ flags.MarkHidden(flagName)
+ }
+}
diff --git a/cmd/podman/varlink.go b/cmd/podman/varlink.go
index 38ce77415..d9c6cdb47 100644
--- a/cmd/podman/varlink.go
+++ b/cmd/podman/varlink.go
@@ -1,59 +1,64 @@
-// +build varlink
+// +build varlink,!remoteclient
package main
import (
"time"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/pkg/varlinkapi"
"github.com/containers/libpod/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
"github.com/varlink/go/varlink"
)
var (
+ varlinkCommand cliconfig.VarlinkValues
varlinkDescription = `
podman varlink
run varlink interface
`
- varlinkFlags = []cli.Flag{
- cli.IntFlag{
- Name: "timeout, t",
- Usage: "Time until the varlink session expires in milliseconds. Use 0 to disable the timeout.",
- Value: 1000,
+ _varlinkCommand = &cobra.Command{
+ Use: "varlink",
+ Short: "Run varlink interface",
+ Long: varlinkDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ varlinkCommand.InputArgs = args
+ varlinkCommand.GlobalFlags = MainGlobalOpts
+ return varlinkCmd(&varlinkCommand)
},
- }
- varlinkCommand = &cli.Command{
- Name: "varlink",
- Usage: "Run varlink interface",
- Description: varlinkDescription,
- Flags: sortFlags(varlinkFlags),
- Action: varlinkCmd,
- ArgsUsage: "VARLINK_URI",
- OnUsageError: usageErrorHandler,
+ Example: `podman varlink unix:/run/podman/io.podman
+ podman varlink --timeout 5000 unix:/run/podman/io.podman`,
}
)
-func varlinkCmd(c *cli.Context) error {
- args := c.Args()
+func init() {
+ varlinkCommand.Command = _varlinkCommand
+ varlinkCommand.SetUsageTemplate(UsageTemplate())
+ flags := varlinkCommand.Flags()
+ flags.Int64VarP(&varlinkCommand.Timeout, "timeout", "t", 1000, "Time until the varlink session expires in milliseconds. Use 0 to disable the timeout")
+}
+
+func varlinkCmd(c *cliconfig.VarlinkValues) error {
+ args := c.InputArgs
if len(args) < 1 {
return errors.Errorf("you must provide a varlink URI")
}
- timeout := time.Duration(c.Int64("timeout")) * time.Millisecond
+ timeout := time.Duration(c.Timeout) * time.Millisecond
// Create a single runtime for varlink
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- var varlinkInterfaces = []*iopodman.VarlinkInterface{varlinkapi.New(c, runtime)}
+ var varlinkInterfaces = []*iopodman.VarlinkInterface{varlinkapi.New(&c.PodmanCommand, runtime)}
// Register varlink service. The metadata can be retrieved with:
// $ varlink info [varlink address URI]
service, err := varlink.NewService(
@@ -79,7 +84,7 @@ func varlinkCmd(c *cli.Context) error {
logrus.Infof("varlink service expired (use --timeout to increase session time beyond %d ms, 0 means never timeout)", c.Int64("timeout"))
return nil
default:
- return errors.Errorf("unable to start varlink service")
+ return errors.Wrapf(err, "unable to start varlink service")
}
}
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index 101232b0c..618af3481 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -2,15 +2,13 @@
# in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in the upstream libpod repository.
interface io.podman
-
-# Version is the structure returned by GetVersion
-type Version (
- version: string,
- go_version: string,
- git_commit: string,
- built: int,
- os_arch: string,
- remote_api_version: int
+type Volume (
+ name: string,
+ labels: [string]string,
+ mountPoint: string,
+ driver: string,
+ options: [string]string,
+ scope: string
)
type NotImplemented (
@@ -20,6 +18,7 @@ type NotImplemented (
type StringResponse (
message: string
)
+
# ContainerChanges describes the return struct for ListContainerChanges
type ContainerChanges (
changed: []string,
@@ -27,14 +26,35 @@ type ContainerChanges (
deleted: []string
)
-# ImageInList describes the structure that is returned in
-# ListImages.
-type ImageInList (
+type ImageSaveOptions (
+ name: string,
+ format: string,
+ output: string,
+ outputType: string,
+ moreTags: []string,
+ quiet: bool,
+ compress: bool
+)
+
+type VolumeCreateOpts (
+ volumeName: string,
+ driver: string,
+ labels: [string]string,
+ options: [string]string
+)
+
+type VolumeRemoveOpts (
+ volumes: []string,
+ all: bool,
+ force: bool
+)
+
+type Image (
id: string,
parentId: string,
repoTags: []string,
repoDigests: []string,
- created: string,
+ created: string, # as RFC3339
size: int,
virtualSize: int,
containers: int,
@@ -45,30 +65,35 @@ type ImageInList (
# ImageHistory describes the returned structure from ImageHistory.
type ImageHistory (
id: string,
- created: string,
+ created: string, # as RFC3339
createdBy: string,
tags: []string,
size: int,
comment: string
)
-# ImageSearch is the returned structure for SearchImage. It is returned
-# in array form.
-type ImageSearch (
+# Represents a single search result from SearchImages
+type ImageSearchResult (
description: string,
is_official: bool,
is_automated: bool,
+ registry: string,
name: string,
star_count: int
)
-# ListContainerData is the returned struct for an individual container
-type ListContainerData (
+type ImageSearchFilter (
+ is_official: ?bool,
+ is_automated: ?bool,
+ star_count: int
+)
+
+type Container (
id: string,
image: string,
imageid: string,
command: []string,
- createdat: string,
+ createdat: string, # as RFC3339
runningfor: string,
status: string,
ports: []ContainerPortMappings,
@@ -303,37 +328,54 @@ type IDMap (
size: int
)
+# BuildOptions are are used to describe describe physical attributes of the build
+type BuildOptions (
+ addHosts: []string,
+ cgroupParent: string,
+ cpuPeriod: int,
+ cpuQuota: int,
+ cpuShares: int,
+ cpusetCpus: string,
+ cpusetMems: string,
+ memory: int,
+ memorySwap: int,
+ shmSize: string,
+ ulimit: []string,
+ volume: []string
+)
+
# BuildInfo is used to describe user input for building images
type BuildInfo (
- # paths to one or more dockerfiles
- dockerfile: []string,
- tags: []string,
- add_hosts: []string,
- cgroup_parent: string,
- cpu_period: int,
- cpu_quota: int,
- cpu_shares: int,
- cpuset_cpus: string,
- cpuset_mems: string,
- memory: string,
- memory_swap: string,
- security_opts: []string,
- shm_size: string,
- ulimit: []string,
- volume: []string,
- squash: bool,
- pull: bool,
- pull_always: bool,
- force_rm: bool,
- rm: bool,
- label: []string,
+ additionalTags: []string,
annotations: []string,
- build_args: [string]string,
- image_format: string
+ buildArgs: [string]string,
+ buildOptions: BuildOptions,
+ cniConfigDir: string,
+ cniPluginDir: string,
+ compression: string,
+ contextDir: string,
+ defaultsMountFilePath: string,
+ dockerfiles: []string,
+ err: string,
+ forceRmIntermediateCtrs: bool,
+ iidfile: string,
+ label: []string,
+ layers: bool,
+ nocache: bool,
+ out: string,
+ output: string,
+ outputFormat: string,
+ pullPolicy: string,
+ quiet: bool,
+ remoteIntermediateCtrs: bool,
+ reportWriter: string,
+ runtimeArgs: []string,
+ signaturePolicyPath: string,
+ squash: bool
)
-# BuildResponse is used to describe the responses for building images
-type BuildResponse (
+# MoreResponse is a struct for when responses from varlink requires longer output
+type MoreResponse (
logs: []string,
id: string
)
@@ -387,40 +429,35 @@ type Runlabel(
name: string,
pull: bool,
signaturePolicyPath: string,
- tlsVerify: bool,
+ tlsVerify: ?bool,
label: string,
extraArgs: []string,
opts: [string]string
)
-# Ping provides a response for developers to ensure their varlink setup is working.
-# #### Example
-# ~~~
-# $ varlink call -m unix:/run/podman/io.podman/io.podman.Ping
-# {
-# "ping": {
-# "message": "OK"
-# }
-# }
-# ~~~
-method Ping() -> (ping: StringResponse)
-
-# GetVersion returns a Version structure describing the libpod setup on their
-# system.
-method GetVersion() -> (version: Version)
+# GetVersion returns version and build information of the podman service
+method GetVersion() -> (
+ version: string,
+ go_version: string,
+ git_commit: string,
+ built: string, # as RFC3339
+ os_arch: string,
+ remote_api_version: int
+)
# GetInfo returns a [PodmanInfo](#PodmanInfo) struct that describes podman and its host such as storage stats,
# build information of Podman, and system-wide registries.
method GetInfo() -> (info: PodmanInfo)
-# ListContainers returns a list of containers in no particular order. There are
-# returned as an array of ListContainerData structs. See also [GetContainer](#GetContainer).
-method ListContainers() -> (containers: []ListContainerData)
+# ListContainers returns information about all containers.
+# See also [GetContainer](#GetContainer).
+method ListContainers() -> (containers: []Container)
-# GetContainer takes a name or ID of a container and returns single ListContainerData
-# structure. A [ContainerNotFound](#ContainerNotFound) error will be returned if the container cannot be found.
-# See also [ListContainers](ListContainers) and [InspectContainer](#InspectContainer).
-method GetContainer(name: string) -> (container: ListContainerData)
+# GetContainer returns information about a single container. If a container
+# with the given id doesn't exist, a [ContainerNotFound](#ContainerNotFound)
+# error will be returned. See also [ListContainers](ListContainers) and
+# [InspectContainer](#InspectContainer).
+method GetContainer(id: string) -> (container: Container)
# CreateContainer creates a new container from an image. It uses a [Create](#Create) type for input. The minimum
# input required for CreateContainer is an image name. If the image name is not found, an [ImageNotFound](#ImageNotFound)
@@ -508,7 +545,7 @@ method ExportContainer(name: string, path: string) -> (tarfile: string)
method GetContainerStats(name: string) -> (container: ContainerStats)
# This method has not be implemented yet.
-method ResizeContainerTty() -> (notimplemented: NotImplemented)
+# method ResizeContainerTty() -> (notimplemented: NotImplemented)
# StartContainer starts a created or stopped container. It takes the name or ID of container. It returns
# the container ID once started. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)
@@ -540,10 +577,10 @@ method RestartContainer(name: string, timeout: int) -> (container: string)
method KillContainer(name: string, signal: int) -> (container: string)
# This method has not be implemented yet.
-method UpdateContainer() -> (notimplemented: NotImplemented)
+# method UpdateContainer() -> (notimplemented: NotImplemented)
# This method has not be implemented yet.
-method RenameContainer() -> (notimplemented: NotImplemented)
+# method RenameContainer() -> (notimplemented: NotImplemented)
# PauseContainer takes the name or ID of container and pauses it. If the container cannot be found,
# a [ContainerNotFound](#ContainerNotFound) error will be returned; otherwise the ID of the container is returned.
@@ -556,7 +593,7 @@ method PauseContainer(name: string) -> (container: string)
method UnpauseContainer(name: string) -> (container: string)
# This method has not be implemented yet.
-method AttachToContainer() -> (notimplemented: NotImplemented)
+# method AttachToContainer() -> (notimplemented: NotImplemented)
# GetAttachSockets takes the name or ID of an existing container. It returns file paths for two sockets needed
# to properly communicate with a container. The first is the actual I/O socket that the container uses. The
@@ -580,8 +617,9 @@ method GetAttachSockets(name: string) -> (sockets: Sockets)
# a [ContainerNotFound](#ContainerNotFound) error is returned.
method WaitContainer(name: string) -> (exitcode: int)
-# RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running
-# container can be stopped and removed. Upon successful removal of the container, its ID is returned. If the
+# RemoveContainer requires the name or ID of container as well a boolean representing whether a running container can be stopped and removed, and a boolean
+# indicating whether to remove builtin volumes. Upon successful removal of the
+# container, its ID is returned. If the
# container cannot be found by name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned.
# #### Example
# ~~~
@@ -590,7 +628,7 @@ method WaitContainer(name: string) -> (exitcode: int)
# "container": "62f4fd98cb57f529831e8f90610e54bba74bd6f02920ffb485e15376ed365c20"
# }
# ~~~
-method RemoveContainer(name: string, force: bool) -> (container: string)
+method RemoveContainer(name: string, force: bool, removeVolumes: bool) -> (container: string)
# DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted
# container IDs. See also [RemoveContainer](RemoveContainer).
@@ -608,21 +646,21 @@ method RemoveContainer(name: string, force: bool) -> (container: string)
# ~~~
method DeleteStoppedContainers() -> (containers: []string)
-# ListImages returns an array of ImageInList structures which provide basic information about
-# an image currently in storage. See also [InspectImage](InspectImage).
-method ListImages() -> (images: []ImageInList)
+# ListImages returns information about the images that are currently in storage.
+# See also [InspectImage](InspectImage).
+method ListImages() -> (images: []Image)
-# GetImage returns a single image in an [ImageInList](#ImageInList) struct. You must supply an image name as a string.
-# If the image cannot be found, an [ImageNotFound](#ImageNotFound) error will be returned.
-method GetImage(name: string) -> (image: ImageInList)
+# GetImage returns information about a single image in storage.
+# If the image caGetImage returns be found, [ImageNotFound](#ImageNotFound) will be returned.
+method GetImage(id: string) -> (image: Image)
# BuildImage takes a [BuildInfo](#BuildInfo) structure and builds an image. At a minimum, you must provide the
-# 'dockerfile' and 'tags' options in the BuildInfo structure. It will return a [BuildResponse](#BuildResponse) structure
+# 'dockerfile' and 'tags' options in the BuildInfo structure. It will return a [MoreResponse](#MoreResponse) structure
# that contains the build logs and resulting image ID.
-method BuildImage(build: BuildInfo) -> (image: BuildResponse)
+method BuildImage(build: BuildInfo) -> (image: MoreResponse)
# This function is not implemented yet.
-method CreateImage() -> (notimplemented: NotImplemented)
+# method CreateImage() -> (notimplemented: NotImplemented)
# InspectImage takes the name or ID of an image and returns a string respresentation of data associated with the
#image. You must serialize the string into JSON to use it further. An [ImageNotFound](#ImageNotFound) error will
@@ -637,8 +675,8 @@ method HistoryImage(name: string) -> (history: []ImageHistory)
# PushImage takes three input arguments: the name or ID of an image, the fully-qualified destination name of the image,
# and a boolean as to whether tls-verify should be used (with false disabling TLS, not affecting the default behavior).
# It will return an [ImageNotFound](#ImageNotFound) error if
-# the image cannot be found in local storage; otherwise the ID of the image will be returned on success.
-method PushImage(name: string, tag: string, tlsverify: bool, signaturePolicy: string, creds: string, certDir: string, compress: bool, format: string, removeSignatures: bool, signBy: string) -> (image: string)
+# the image cannot be found in local storage; otherwise it will return a [MoreResponse](#MoreResponse)
+method PushImage(name: string, tag: string, tlsverify: ?bool, signaturePolicy: string, creds: string, certDir: string, compress: bool, format: string, removeSignatures: bool, signBy: string) -> (reply: MoreResponse)
# TagImage takes the name or ID of an image in local storage as well as the desired tag name. If the image cannot
# be found, an [ImageNotFound](#ImageNotFound) error will be returned; otherwise, the ID of the image is returned on success.
@@ -656,10 +694,10 @@ method TagImage(name: string, tagged: string) -> (image: string)
# ~~~
method RemoveImage(name: string, force: bool) -> (image: string)
-# SearchImage takes the string of an image name and a limit of searches from each registries to be returned. SearchImage
-# will then use a glob-like match to find the image you are searching for. The images are returned in an array of
-# ImageSearch structures which contain information about the image as well as its fully-qualified name.
-method SearchImage(name: string, limit: int) -> (images: []ImageSearch)
+# SearchImages searches available registries for images that contain the
+# contents of "query" in their name. If "limit" is given, limits the amount of
+# search results per registry.
+method SearchImages(query: string, limit: ?int, tlsVerify: ?bool, filter: ImageSearchFilter) -> (results: []ImageSearchResult)
# DeleteUnusedImages deletes any images not associated with a container. The IDs of the deleted images are returned
# in a string array.
@@ -697,16 +735,10 @@ method ImportImage(source: string, reference: string, message: string, changes:
# error will be returned. See also [ImportImage](ImportImage).
method ExportImage(name: string, destination: string, compress: bool, tags: []string) -> (image: string)
-# PullImage pulls an image from a repository to local storage. After the pull is successful, the ID of the image
-# is returned.
-# #### Example
-# ~~~
-# $ varlink call -m unix:/run/podman/io.podman/io.podman.PullImage '{"name": "registry.fedoraproject.org/fedora"}'
-# {
-# "id": "426866d6fa419873f97e5cbd320eeb22778244c1dfffa01c944db3114f55772e"
-# }
-# ~~~
-method PullImage(name: string, certDir: string, creds: string, signaturePolicy: string, tlsVerify: bool) -> (id: string)
+# PullImage pulls an image from a repository to local storage. After a successful pull, the image id and logs
+# are returned as a [MoreResponse](#MoreResponse). This connection also will handle a WantsMores request to send
+# status as it occurs.
+method PullImage(name: string, certDir: string, creds: string, signaturePolicy: string, tlsVerify: ?bool) -> (reply: MoreResponse)
# CreatePod creates a new empty pod. It uses a [PodCreate](#PodCreate) type for input.
# On success, the ID of the newly created pod will be returned.
@@ -913,10 +945,10 @@ method UnpausePod(name: string) -> (pod: string)
method RemovePod(name: string, force: bool) -> (pod: string)
# This method has not be implemented yet.
-method WaitPod() -> (notimplemented: NotImplemented)
+# method WaitPod() -> (notimplemented: NotImplemented)
# This method has not been implemented yet.
-method TopPod() -> (notimplemented: NotImplemented)
+# method TopPod() -> (notimplemented: NotImplemented)
# GetPodStats takes the name or ID of a pod and returns a pod name and slice of ContainerStats structure which
# contains attributes like memory and cpu usage. If the pod cannot be found, a [PodNotFound](#PodNotFound)
@@ -1020,19 +1052,19 @@ method UnmountContainer(name: string, force: bool) -> ()
method ImagesPrune(all: bool) -> (pruned: []string)
# This function is not implemented yet.
-method ListContainerPorts(name: string) -> (notimplemented: NotImplemented)
+# method ListContainerPorts(name: string) -> (notimplemented: NotImplemented)
# GenerateKube generates a Kubernetes v1 Pod description of a Podman container or pod
# and its containers. The description is in YAML. See also [ReplayKube](ReplayKube).
-method GenerateKube() -> (notimplemented: NotImplemented)
+# method GenerateKube() -> (notimplemented: NotImplemented)
# GenerateKubeService generates a Kubernetes v1 Service description of a Podman container or pod
# and its containers. The description is in YAML. See also [GenerateKube](GenerateKube).
-method GenerateKubeService() -> (notimplemented: NotImplemented)
+# method GenerateKubeService() -> (notimplemented: NotImplemented)
# ReplayKube recreates a pod and its containers based on a Kubernetes v1 Pod description (in YAML)
# like that created by GenerateKube. See also [GenerateKube](GenerateKube).
-method ReplayKube() -> (notimplemented: NotImplemented)
+# method ReplayKube() -> (notimplemented: NotImplemented)
# ContainerConfig returns a container's config in string form. This call is for
# development of Podman only and generally should not be used.
@@ -1050,20 +1082,53 @@ method ContainerInspectData(name: string) -> (config: string)
# development of Podman only and generally should not be used.
method ContainerStateData(name: string) -> (config: string)
+# PodStateData returns inspectr level information of a given pod in string form. This call is for
+# development of Podman only and generally should not be used.
+method PodStateData(name: string) -> (config: string)
+
+# Sendfile allows a remote client to send a file to the host
method SendFile(type: string, length: int) -> (file_handle: string)
+
+# ReceiveFile allows the host to send a remote client a file
method ReceiveFile(path: string, delete: bool) -> (len: int)
+# VolumeCreate creates a volume on a remote host
+method VolumeCreate(options: VolumeCreateOpts) -> (volumeName: string)
+
+# VolumeRemove removes a volume on a remote host
+method VolumeRemove(options: VolumeRemoveOpts) -> (volumeNames: []string)
+
+# GetVolumes gets slice of the volumes on a remote host
+method GetVolumes(args: []string, all: bool) -> (volumes: []Volume)
+
+# VolumesPrune removes unused volumes on the host
+method VolumesPrune() -> (prunedNames: []string, prunedErrors: []string)
+
+# ImageSave allows you to save an image from the local image storage to a tarball
+method ImageSave(options: ImageSaveOptions) -> (reply: MoreResponse)
+
+# GetPodsByContext allows you to get a list pod ids depending on all, latest, or a list of
+# pod names. The definition of latest pod means the latest by creation date. In a multi-
+# user environment, results might differ from what you expect.
+method GetPodsByContext(all: bool, latest: bool, args: []string) -> (pods: []string)
+
+# LoadImage allows you to load an image into local storage from a tarball.
+method LoadImage(name: string, inputFile: string, quiet: bool, deleteFile: bool) -> (reply: MoreResponse)
+
# ImageNotFound means the image could not be found by the provided name or ID in local storage.
-error ImageNotFound (name: string)
+error ImageNotFound (id: string, reason: string)
# ContainerNotFound means the container could not be found by the provided name or ID in local storage.
-error ContainerNotFound (name: string)
+error ContainerNotFound (id: string, reason: string)
# NoContainerRunning means none of the containers requested are running in a command that requires a running container.
error NoContainerRunning ()
# PodNotFound means the pod could not be found by the provided name or ID in local storage.
-error PodNotFound (name: string)
+error PodNotFound (name: string, reason: string)
+
+# VolumeNotFound means the volume could not be found by the name or ID in local storage.
+error VolumeNotFound (id: string, reason: string)
# PodContainerError means a container associated with a pod failed to preform an operation. It contains
# a container ID of the container that failed.
diff --git a/cmd/podman/varlink_dummy.go b/cmd/podman/varlink_dummy.go
index ec4bbb208..430511d72 100644
--- a/cmd/podman/varlink_dummy.go
+++ b/cmd/podman/varlink_dummy.go
@@ -2,8 +2,10 @@
package main
-import (
- "github.com/urfave/cli"
-)
+import "github.com/spf13/cobra"
-var varlinkCommand *cli.Command
+var (
+ _varlinkCommand = &cobra.Command{
+ Use: "",
+ }
+)
diff --git a/cmd/podman/version.go b/cmd/podman/version.go
index ce773ee2e..c65ba94f9 100644
--- a/cmd/podman/version.go
+++ b/cmd/podman/version.go
@@ -6,20 +6,41 @@ import (
"text/tabwriter"
"time"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
"github.com/containers/libpod/libpod"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
+var (
+ versionCommand cliconfig.VersionValues
+ _versionCommand = &cobra.Command{
+ Use: "version",
+ Short: "Display the Podman Version Information",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ versionCommand.InputArgs = args
+ versionCommand.GlobalFlags = MainGlobalOpts
+ return versionCmd(&versionCommand)
+ },
+ }
+)
+
+func init() {
+ versionCommand.Command = _versionCommand
+ versionCommand.SetUsageTemplate(UsageTemplate())
+ flags := versionCommand.Flags()
+ flags.StringVarP(&versionCommand.Format, "format", "f", "", "Change the output format to JSON or a Go template")
+}
+
// versionCmd gets and prints version info for version command
-func versionCmd(c *cli.Context) error {
+func versionCmd(c *cliconfig.VersionValues) error {
output, err := libpod.GetVersion()
if err != nil {
errors.Wrapf(err, "unable to determine version")
}
- versionOutputFormat := c.String("format")
+ versionOutputFormat := c.Format
if versionOutputFormat != "" {
var out formats.Writer
switch versionOutputFormat {
@@ -46,19 +67,3 @@ func versionCmd(c *cli.Context) error {
fmt.Fprintf(w, "OS/Arch:\t%s\n", output.OsArch)
return nil
}
-
-// Cli command to print out the full version of podman
-var (
- versionCommand = cli.Command{
- Name: "version",
- Usage: "Display the Podman Version Information",
- Action: versionCmd,
- Flags: versionFlags,
- }
- versionFlags = []cli.Flag{
- cli.StringFlag{
- Name: "format, f",
- Usage: "Change the output format to JSON or a Go template",
- },
- }
-)
diff --git a/cmd/podman/volume.go b/cmd/podman/volume.go
index 913592e74..8a8664151 100644
--- a/cmd/podman/volume.go
+++ b/cmd/podman/volume.go
@@ -1,26 +1,31 @@
package main
import (
- "github.com/urfave/cli"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/spf13/cobra"
)
-var (
- volumeDescription = `Manage volumes.
+var volumeDescription = `Manage volumes.
Volumes are created in and can be shared between containers.`
- volumeSubCommands = []cli.Command{
- volumeCreateCommand,
- volumeLsCommand,
- volumeRmCommand,
- volumeInspectCommand,
- volumePruneCommand,
- }
- volumeCommand = cli.Command{
- Name: "volume",
- Usage: "Manage volumes",
- Description: volumeDescription,
- UseShortOptionHandling: true,
- Subcommands: volumeSubCommands,
- }
-)
+var volumeCommand = cliconfig.PodmanCommand{
+ Command: &cobra.Command{
+ Use: "volume",
+ Short: "Manage volumes",
+ Long: volumeDescription,
+ },
+}
+var volumeSubcommands = []*cobra.Command{
+ _volumeCreateCommand,
+ _volumeLsCommand,
+ _volumeRmCommand,
+ _volumeInspectCommand,
+ _volumePruneCommand,
+}
+
+func init() {
+ volumeCommand.SetUsageTemplate(UsageTemplate())
+ volumeCommand.AddCommand(volumeSubcommands...)
+ rootCmd.AddCommand(volumeCommand.Command)
+}
diff --git a/cmd/podman/volume_create.go b/cmd/podman/volume_create.go
index 0b5f8d1e3..833191082 100644
--- a/cmd/podman/volume_create.go
+++ b/cmd/podman/volume_create.go
@@ -3,95 +3,69 @@ package main
import (
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
-var volumeCreateDescription = `
+var (
+ volumeCreateCommand cliconfig.VolumeCreateValues
+ volumeCreateDescription = `
podman volume create
Creates a new volume. If using the default driver, "local", the volume will
be created at.`
-var volumeCreateFlags = []cli.Flag{
- cli.StringFlag{
- Name: "driver",
- Usage: "Specify volume driver name (default local)",
- },
- cli.StringSliceFlag{
- Name: "label, l",
- Usage: "Set metadata for a volume (default [])",
- },
- cli.StringSliceFlag{
- Name: "opt, o",
- Usage: "Set driver specific options (default [])",
- },
-}
-
-var volumeCreateCommand = cli.Command{
- Name: "create",
- Usage: "Create a new volume",
- Description: volumeCreateDescription,
- Flags: volumeCreateFlags,
- Action: volumeCreateCmd,
- SkipArgReorder: true,
- ArgsUsage: "[VOLUME-NAME]",
- UseShortOptionHandling: true,
-}
+ _volumeCreateCommand = &cobra.Command{
+ Use: "create",
+ Short: "Create a new volume",
+ Long: volumeCreateDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ volumeCreateCommand.InputArgs = args
+ volumeCreateCommand.GlobalFlags = MainGlobalOpts
+ return volumeCreateCmd(&volumeCreateCommand)
+ },
+ Example: `podman volume create myvol
+ podman volume create
+ podman volume create --label foo=bar myvol`,
+ }
+)
-func volumeCreateCmd(c *cli.Context) error {
- var (
- options []libpod.VolumeCreateOption
- err error
- volName string
- )
+func init() {
+ volumeCreateCommand.Command = _volumeCreateCommand
+ volumeCreateCommand.SetUsageTemplate(UsageTemplate())
+ flags := volumeCreateCommand.Flags()
+ flags.StringVar(&volumeCreateCommand.Driver, "driver", "", "Specify volume driver name (default local)")
+ flags.StringSliceVarP(&volumeCreateCommand.Label, "label", "l", []string{}, "Set metadata for a volume (default [])")
+ flags.StringSliceVarP(&volumeCreateCommand.Opt, "opt", "o", []string{}, "Set driver specific options (default [])")
- if err = validateFlags(c, volumeCreateFlags); err != nil {
- return err
- }
+}
- runtime, err := libpodruntime.GetRuntime(c)
+func volumeCreateCmd(c *cliconfig.VolumeCreateValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- if len(c.Args()) > 1 {
+ if len(c.InputArgs) > 1 {
return errors.Errorf("too many arguments, create takes at most 1 argument")
}
- if len(c.Args()) > 0 {
- volName = c.Args()[0]
- options = append(options, libpod.WithVolumeName(volName))
- }
-
- if c.IsSet("driver") {
- options = append(options, libpod.WithVolumeDriver(c.String("driver")))
- }
-
- labels, err := getAllLabels([]string{}, c.StringSlice("label"))
+ labels, err := getAllLabels([]string{}, c.Label)
if err != nil {
return errors.Wrapf(err, "unable to process labels")
}
- if len(labels) != 0 {
- options = append(options, libpod.WithVolumeLabels(labels))
- }
- opts, err := getAllLabels([]string{}, c.StringSlice("opt"))
+ opts, err := getAllLabels([]string{}, c.Opt)
if err != nil {
return errors.Wrapf(err, "unable to process options")
}
- if len(options) != 0 {
- options = append(options, libpod.WithVolumeOptions(opts))
- }
- vol, err := runtime.NewVolume(getContext(), options...)
- if err != nil {
- return err
+ volumeName, err := runtime.CreateVolume(getContext(), c, labels, opts)
+ if err == nil {
+ fmt.Println(volumeName)
}
- fmt.Printf("%s\n", vol.Name())
-
- return nil
+ return err
}
diff --git a/cmd/podman/volume_inspect.go b/cmd/podman/volume_inspect.go
index 152f1d098..dc6afbc36 100644
--- a/cmd/podman/volume_inspect.go
+++ b/cmd/podman/volume_inspect.go
@@ -1,63 +1,58 @@
package main
import (
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
-var volumeInspectDescription = `
+var (
+ volumeInspectCommand cliconfig.VolumeInspectValues
+ volumeInspectDescription = `
podman volume inspect
Display detailed information on one or more volumes. Can change the format
from JSON to a Go template.
`
+ _volumeInspectCommand = &cobra.Command{
+ Use: "inspect",
+ Short: "Display detailed information on one or more volumes",
+ Long: volumeInspectDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ volumeInspectCommand.InputArgs = args
+ volumeInspectCommand.GlobalFlags = MainGlobalOpts
+ return volumeInspectCmd(&volumeInspectCommand)
+ },
+ Example: `podman volume inspect myvol
+ podman volume inspect --all
+ podman volume inspect --format "{{.Driver}} {{.Scope}}" myvol`,
+ }
+)
-var volumeInspectFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Inspect all volumes",
- },
- cli.StringFlag{
- Name: "format, f",
- Usage: "Format volume output using Go template",
- Value: "json",
- },
-}
+func init() {
+ volumeInspectCommand.Command = _volumeInspectCommand
+ volumeInspectCommand.SetUsageTemplate(UsageTemplate())
+ flags := volumeInspectCommand.Flags()
+ flags.BoolVarP(&volumeInspectCommand.All, "all", "a", false, "Inspect all volumes")
+ flags.StringVarP(&volumeInspectCommand.Format, "format", "f", "json", "Format volume output using Go template")
-var volumeInspectCommand = cli.Command{
- Name: "inspect",
- Usage: "Display detailed information on one or more volumes",
- Description: volumeInspectDescription,
- Flags: volumeInspectFlags,
- Action: volumeInspectCmd,
- SkipArgReorder: true,
- ArgsUsage: "[VOLUME-NAME ...]",
- UseShortOptionHandling: true,
}
-func volumeInspectCmd(c *cli.Context) error {
- var err error
-
- if err = validateFlags(c, volumeInspectFlags); err != nil {
- return err
+func volumeInspectCmd(c *cliconfig.VolumeInspectValues) error {
+ if (c.All && len(c.InputArgs) > 0) || (!c.All && len(c.InputArgs) < 1) {
+ return errors.New("provide one or more volume names or use --all")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- opts := volumeLsOptions{
- Format: c.String("format"),
- }
-
- vols, lastError := getVolumesFromContext(c, runtime)
- if lastError != nil {
- logrus.Errorf("%q", lastError)
+ vols, err := runtime.InspectVolumes(getContext(), c)
+ if err != nil {
+ return err
}
-
- return generateVolLsOutput(vols, opts, runtime)
+ return generateVolLsOutput(vols, volumeLsOptions{Format: c.Format})
}
diff --git a/cmd/podman/volume_ls.go b/cmd/podman/volume_ls.go
index 0f94549ee..5adfc1e91 100644
--- a/cmd/podman/volume_ls.go
+++ b/cmd/podman/volume_ls.go
@@ -4,11 +4,11 @@ import (
"reflect"
"strings"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/formats"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
// volumeOptions is the "ls" command options
@@ -37,70 +37,64 @@ type volumeLsJSONParams struct {
Scope string `json:"scope"`
}
-var volumeLsDescription = `
+var (
+ volumeLsCommand cliconfig.VolumeLsValues
+
+ volumeLsDescription = `
podman volume ls
List all available volumes. The output of the volumes can be filtered
and the output format can be changed to JSON or a user specified Go template.
`
+ _volumeLsCommand = &cobra.Command{
+ Use: "ls",
+ Aliases: []string{"list"},
+ Short: "List volumes",
+ Long: volumeLsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ volumeLsCommand.InputArgs = args
+ volumeLsCommand.GlobalFlags = MainGlobalOpts
+ return volumeLsCmd(&volumeLsCommand)
+ },
+ }
+)
-var volumeLsFlags = []cli.Flag{
- cli.StringFlag{
- Name: "filter, f",
- Usage: "Filter volume output",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "Format volume output using Go template",
- Value: "table {{.Driver}}\t{{.Name}}",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "Print volume output in quiet mode",
- },
-}
+func init() {
+ volumeLsCommand.Command = _volumeLsCommand
+ volumeLsCommand.SetUsageTemplate(UsageTemplate())
+ flags := volumeLsCommand.Flags()
-var volumeLsCommand = cli.Command{
- Name: "ls",
- Aliases: []string{"list"},
- Usage: "List volumes",
- Description: volumeLsDescription,
- Flags: volumeLsFlags,
- Action: volumeLsCmd,
- SkipArgReorder: true,
- UseShortOptionHandling: true,
+ flags.StringVarP(&volumeLsCommand.Filter, "filter", "f", "", "Filter volume output")
+ flags.StringVar(&volumeLsCommand.Format, "format", "table {{.Driver}}\t{{.Name}}", "Format volume output using Go template")
+ flags.BoolVarP(&volumeLsCommand.Quiet, "quiet", "q", false, "Print volume output in quiet mode")
}
-func volumeLsCmd(c *cli.Context) error {
- if err := validateFlags(c, volumeLsFlags); err != nil {
- return err
- }
-
- runtime, err := libpodruntime.GetRuntime(c)
+func volumeLsCmd(c *cliconfig.VolumeLsValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- if len(c.Args()) > 0 {
+ if len(c.InputArgs) > 0 {
return errors.Errorf("too many arguments, ls takes no arguments")
}
opts := volumeLsOptions{
- Quiet: c.Bool("quiet"),
+ Quiet: c.Quiet,
}
opts.Format = genVolLsFormat(c)
// Get the filter functions based on any filters set
- var filterFuncs []libpod.VolumeFilter
- if c.String("filter") != "" {
- filters := strings.Split(c.String("filter"), ",")
+ var filterFuncs []adapter.VolumeFilter
+ if c.Filter != "" {
+ filters := strings.Split(c.Filter, ",")
for _, f := range filters {
filterSplit := strings.Split(f, "=")
if len(filterSplit) < 2 {
return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
}
- generatedFunc, err := generateVolumeFilterFuncs(filterSplit[0], filterSplit[1], runtime)
+ generatedFunc, err := generateVolumeFilterFuncs(filterSplit[0], filterSplit[1])
if err != nil {
return errors.Wrapf(err, "invalid filter")
}
@@ -108,13 +102,12 @@ func volumeLsCmd(c *cli.Context) error {
}
}
- volumes, err := runtime.GetAllVolumes()
+ volumes, err := runtime.Volumes(getContext())
if err != nil {
return err
}
-
// Get the volumes that match the filter
- volsFiltered := make([]*libpod.Volume, 0, len(volumes))
+ volsFiltered := make([]*adapter.Volume, 0, len(volumes))
for _, vol := range volumes {
include := true
for _, filter := range filterFuncs {
@@ -125,18 +118,18 @@ func volumeLsCmd(c *cli.Context) error {
volsFiltered = append(volsFiltered, vol)
}
}
- return generateVolLsOutput(volsFiltered, opts, runtime)
+ return generateVolLsOutput(volsFiltered, opts)
}
// generate the template based on conditions given
-func genVolLsFormat(c *cli.Context) string {
+func genVolLsFormat(c *cliconfig.VolumeLsValues) string {
var format string
- if c.String("format") != "" {
+ if c.Format != "" {
// "\t" from the command line is not being recognized as a tab
// replacing the string "\t" to a tab character if the user passes in "\t"
- format = strings.Replace(c.String("format"), `\t`, "\t", -1)
+ format = strings.Replace(c.Format, `\t`, "\t", -1)
}
- if c.Bool("quiet") {
+ if c.Quiet {
format = "{{.Name}}"
}
return format
@@ -211,7 +204,7 @@ func getVolTemplateOutput(lsParams []volumeLsJSONParams, opts volumeLsOptions) (
}
// getVolJSONParams returns the volumes in JSON format
-func getVolJSONParams(volumes []*libpod.Volume, opts volumeLsOptions, runtime *libpod.Runtime) ([]volumeLsJSONParams, error) {
+func getVolJSONParams(volumes []*adapter.Volume) []volumeLsJSONParams {
var lsOutput []volumeLsJSONParams
for _, volume := range volumes {
@@ -226,25 +219,19 @@ func getVolJSONParams(volumes []*libpod.Volume, opts volumeLsOptions, runtime *l
lsOutput = append(lsOutput, params)
}
- return lsOutput, nil
+ return lsOutput
}
// generateVolLsOutput generates the output based on the format, JSON or Go Template, and prints it out
-func generateVolLsOutput(volumes []*libpod.Volume, opts volumeLsOptions, runtime *libpod.Runtime) error {
+func generateVolLsOutput(volumes []*adapter.Volume, opts volumeLsOptions) error {
if len(volumes) == 0 && opts.Format != formats.JSONString {
return nil
}
- lsOutput, err := getVolJSONParams(volumes, opts, runtime)
- if err != nil {
- return err
- }
+ lsOutput := getVolJSONParams(volumes)
var out formats.Writer
switch opts.Format {
case formats.JSONString:
- if err != nil {
- return errors.Wrapf(err, "unable to create JSON for volume output")
- }
out = formats.JSONStructArray{Output: volLsToGeneric([]volumeLsTemplateParams{}, lsOutput)}
default:
lsOutput, err := getVolTemplateOutput(lsOutput, opts)
@@ -257,18 +244,18 @@ func generateVolLsOutput(volumes []*libpod.Volume, opts volumeLsOptions, runtime
}
// generateVolumeFilterFuncs returns the true if the volume matches the filter set, otherwise it returns false.
-func generateVolumeFilterFuncs(filter, filterValue string, runtime *libpod.Runtime) (func(volume *libpod.Volume) bool, error) {
+func generateVolumeFilterFuncs(filter, filterValue string) (func(volume *adapter.Volume) bool, error) {
switch filter {
case "name":
- return func(v *libpod.Volume) bool {
+ return func(v *adapter.Volume) bool {
return strings.Contains(v.Name(), filterValue)
}, nil
case "driver":
- return func(v *libpod.Volume) bool {
+ return func(v *adapter.Volume) bool {
return v.Driver() == filterValue
}, nil
case "scope":
- return func(v *libpod.Volume) bool {
+ return func(v *adapter.Volume) bool {
return v.Scope() == filterValue
}, nil
case "label":
@@ -279,7 +266,7 @@ func generateVolumeFilterFuncs(filter, filterValue string, runtime *libpod.Runti
} else {
filterValue = ""
}
- return func(v *libpod.Volume) bool {
+ return func(v *adapter.Volume) bool {
for labelKey, labelValue := range v.Labels() {
if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
return true
@@ -295,7 +282,7 @@ func generateVolumeFilterFuncs(filter, filterValue string, runtime *libpod.Runti
} else {
filterValue = ""
}
- return func(v *libpod.Volume) bool {
+ return func(v *adapter.Volume) bool {
for labelKey, labelValue := range v.Options() {
if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
return true
diff --git a/cmd/podman/volume_prune.go b/cmd/podman/volume_prune.go
index 41d95f9c7..1f7931aa4 100644
--- a/cmd/podman/volume_prune.go
+++ b/cmd/podman/volume_prune.go
@@ -7,73 +7,69 @@ import (
"os"
"strings"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/adapter"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
-var volumePruneDescription = `
+var (
+ volumePruneCommand cliconfig.VolumePruneValues
+ volumePruneDescription = `
podman volume prune
Remove all unused volumes. Will prompt for confirmation if not
using force.
`
+ _volumePruneCommand = &cobra.Command{
+ Use: "prune",
+ Short: "Remove all unused volumes",
+ Long: volumePruneDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ volumePruneCommand.InputArgs = args
+ volumePruneCommand.GlobalFlags = MainGlobalOpts
+ return volumePruneCmd(&volumePruneCommand)
+ },
+ }
+)
-var volumePruneFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "force, f",
- Usage: "Do not prompt for confirmation",
- },
-}
+func init() {
+ volumePruneCommand.Command = _volumePruneCommand
+ volumePruneCommand.SetUsageTemplate(UsageTemplate())
+ flags := volumePruneCommand.Flags()
-var volumePruneCommand = cli.Command{
- Name: "prune",
- Usage: "Remove all unused volumes",
- Description: volumePruneDescription,
- Flags: volumePruneFlags,
- Action: volumePruneCmd,
- SkipArgReorder: true,
- UseShortOptionHandling: true,
+ flags.BoolVarP(&volumePruneCommand.Force, "force", "f", false, "Do not prompt for confirmation")
}
func volumePrune(runtime *adapter.LocalRuntime, ctx context.Context) error {
- var lastError error
-
- volumes, err := runtime.GetAllVolumes()
- if err != nil {
- return err
+ prunedNames, prunedErrors := runtime.PruneVolumes(ctx)
+ for _, name := range prunedNames {
+ fmt.Println(name)
+ }
+ if len(prunedErrors) == 0 {
+ return nil
}
+ // Grab the last error
+ lastError := prunedErrors[len(prunedErrors)-1]
+ // Remove the last error from the error slice
+ prunedErrors = prunedErrors[:len(prunedErrors)-1]
- for _, vol := range volumes {
- err = runtime.RemoveVolume(ctx, vol, false, true)
- if err == nil {
- fmt.Println(vol.Name())
- } else if err != libpod.ErrVolumeBeingUsed {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "failed to remove volume %q", vol.Name())
- }
+ for _, err := range prunedErrors {
+ logrus.Errorf("%q", err)
}
return lastError
}
-func volumePruneCmd(c *cli.Context) error {
-
- if err := validateFlags(c, volumePruneFlags); err != nil {
- return err
- }
-
- runtime, err := adapter.GetRuntime(c)
+func volumePruneCmd(c *cliconfig.VolumePruneValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
// Prompt for confirmation if --force is not set
- if !c.Bool("force") {
+ if !c.Force {
reader := bufio.NewReader(os.Stdin)
fmt.Println("WARNING! This will remove all volumes not used by at least one container.")
fmt.Print("Are you sure you want to continue? [y/N] ")
@@ -85,6 +81,5 @@ func volumePruneCmd(c *cli.Context) error {
return nil
}
}
-
return volumePrune(runtime, getContext())
}
diff --git a/cmd/podman/volume_rm.go b/cmd/podman/volume_rm.go
index 3fb623624..03b6ccae1 100644
--- a/cmd/podman/volume_rm.go
+++ b/cmd/podman/volume_rm.go
@@ -3,69 +3,70 @@ package main
import (
"fmt"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
-var volumeRmDescription = `
+var (
+ volumeRmCommand cliconfig.VolumeRmValues
+ volumeRmDescription = `
podman volume rm
Remove one or more existing volumes. Will only remove volumes that are
not being used by any containers. To remove the volumes anyways, use the
--force flag.
`
+ _volumeRmCommand = &cobra.Command{
+ Use: "rm",
+ Aliases: []string{"remove"},
+ Short: "Remove one or more volumes",
+ Long: volumeRmDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ volumeRmCommand.InputArgs = args
+ volumeRmCommand.GlobalFlags = MainGlobalOpts
+ return volumeRmCmd(&volumeRmCommand)
+ },
+ Example: `podman volume rm myvol1 myvol2
+ podman volume rm --all
+ podman volume rm --force myvol`,
+ }
+)
-var volumeRmFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "all, a",
- Usage: "Remove all volumes",
- },
- cli.BoolFlag{
- Name: "force, f",
- Usage: "Remove a volume by force, even if it is being used by a container",
- },
-}
-
-var volumeRmCommand = cli.Command{
- Name: "rm",
- Aliases: []string{"remove"},
- Usage: "Remove one or more volumes",
- Description: volumeRmDescription,
- Flags: volumeRmFlags,
- Action: volumeRmCmd,
- ArgsUsage: "[VOLUME-NAME ...]",
- SkipArgReorder: true,
- UseShortOptionHandling: true,
+func init() {
+ volumeRmCommand.Command = _volumeRmCommand
+ volumeRmCommand.SetUsageTemplate(UsageTemplate())
+ flags := volumeRmCommand.Flags()
+ flags.BoolVarP(&volumeRmCommand.All, "all", "a", false, "Remove all volumes")
+ flags.BoolVarP(&volumeRmCommand.Force, "force", "f", false, "Remove a volume by force, even if it is being used by a container")
}
-func volumeRmCmd(c *cli.Context) error {
+func volumeRmCmd(c *cliconfig.VolumeRmValues) error {
var err error
- if err = validateFlags(c, volumeRmFlags); err != nil {
- return err
+ if (len(c.InputArgs) > 0 && c.All) || (len(c.InputArgs) < 1 && !c.All) {
+ return errors.New("choose either one or more volumes or all")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
-
- ctx := getContext()
-
- vols, lastError := getVolumesFromContext(c, runtime)
- for _, vol := range vols {
- err = runtime.RemoveVolume(ctx, vol, c.Bool("force"), false)
- if err != nil {
- if lastError != nil {
- logrus.Errorf("%q", lastError)
- }
- lastError = errors.Wrapf(err, "failed to remove volume %q", vol.Name())
- } else {
- fmt.Println(vol.Name())
+ deletedVolumeNames, err := runtime.RemoveVolumes(getContext(), c)
+ if err != nil {
+ if len(deletedVolumeNames) > 0 {
+ printDeleteVolumes(deletedVolumeNames)
+ return err
}
}
- return lastError
+ printDeleteVolumes(deletedVolumeNames)
+ return err
+}
+
+func printDeleteVolumes(volumes []string) {
+ for _, v := range volumes {
+ fmt.Println(v)
+ }
}
diff --git a/cmd/podman/wait.go b/cmd/podman/wait.go
index 35ad7a662..9df7cdbae 100644
--- a/cmd/podman/wait.go
+++ b/cmd/podman/wait.go
@@ -5,43 +5,51 @@ import (
"os"
"time"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
var (
+ waitCommand cliconfig.WaitValues
+
waitDescription = `
podman wait
Block until one or more containers stop and then print their exit codes
`
- waitFlags = []cli.Flag{
- cli.UintFlag{
- Name: "interval, i",
- Usage: "Milliseconds to wait before polling for completion",
- Value: 250,
+ _waitCommand = &cobra.Command{
+ Use: "wait",
+ Short: "Block on one or more containers",
+ Long: waitDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ waitCommand.InputArgs = args
+ waitCommand.GlobalFlags = MainGlobalOpts
+ return waitCmd(&waitCommand)
},
- LatestFlag,
- }
- waitCommand = cli.Command{
- Name: "wait",
- Usage: "Block on one or more containers",
- Description: waitDescription,
- Flags: sortFlags(waitFlags),
- Action: waitCmd,
- ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
- OnUsageError: usageErrorHandler,
+ Example: `podman wait --latest
+ podman wait --interval 5000 ctrID
+ podman wait ctrID1 ctrID2`,
}
)
-func waitCmd(c *cli.Context) error {
- args := c.Args()
- if len(args) < 1 && !c.Bool("latest") {
+func init() {
+ waitCommand.Command = _waitCommand
+ waitCommand.SetUsageTemplate(UsageTemplate())
+ flags := waitCommand.Flags()
+ flags.UintVarP(&waitCommand.Interval, "interval", "i", 250, "Milliseconds to wait before polling for completion")
+ flags.BoolVarP(&waitCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
+ markFlagHiddenForRemoteClient("latest", flags)
+}
+
+func waitCmd(c *cliconfig.WaitValues) error {
+ args := c.InputArgs
+ if len(args) < 1 && !c.Latest {
return errors.Errorf("you must provide at least one container name or id")
}
- runtime, err := libpodruntime.GetRuntime(c)
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
@@ -52,7 +60,7 @@ func waitCmd(c *cli.Context) error {
}
var lastError error
- if c.Bool("latest") {
+ if c.Latest {
latestCtr, err := runtime.GetLatestContainer()
if err != nil {
return errors.Wrapf(err, "unable to wait on latest container")
@@ -65,10 +73,10 @@ func waitCmd(c *cli.Context) error {
if err != nil {
return errors.Wrapf(err, "unable to find container %s", container)
}
- if c.Uint("interval") == 0 {
+ if c.Interval == 0 {
return errors.Errorf("interval must be greater then 0")
}
- returnCode, err := ctr.WaitWithInterval(time.Duration(c.Uint("interval")) * time.Millisecond)
+ returnCode, err := ctr.WaitWithInterval(time.Duration(c.Interval) * time.Millisecond)
if err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
diff --git a/commands.md b/commands.md
index c7d03d5ad..3fd27ad5d 100644
--- a/commands.md
+++ b/commands.md
@@ -16,7 +16,7 @@
| [podman-container-refresh(1)](/docs/podman-container-refresh.1.md) | Refresh all containers state in database ||
| [podman-container-restore(1)](/docs/podman-container-restore.1.md) | Restores one or more running containers ||
| [podman-container-runlabel(1)](/docs/podman-container-runlabel.1.md) | Execute Image Label Method ||
-| [podman-cp(1)](/docs/podman-cp.1.md) | Instead of providing a `podman cp` command, the man page `podman-cp` describes how to use the `podman mount` command to have even more flexibility and functionality||
+| [podman-cp(1)](/docs/podman-cp.1.md) | Copy files/folders between a container and the local filesystem ||
| [podman-create(1)](/docs/podman-create.1.md) | Create a new container ||
| [podman-diff(1)](/docs/podman-diff.1.md) | Inspect changes on a container or image's filesystem |[![...](/docs/play.png)](https://asciinema.org/a/FXfWB9CKYFwYM4EfqW3NSZy1G)|
| [podman-exec(1)](/docs/podman-exec.1.md) | Execute a command in a running container
@@ -73,6 +73,7 @@
| [podman-unpause(1)](/docs/podman-unpause.1.md) | Unpause one or more running containers |[![...](/docs/play.png)](https://asciinema.org/a/141292)|
| [podman-varlink(1)](/docs/podman-varlink.1.md) | Run the varlink backend ||
| [podman-version(1)](/docs/podman-version.1.md) | Display the version information |[![...](/docs/play.png)](https://asciinema.org/a/mfrn61pjZT9Fc8L4NbfdSqfgu)|
+| [podman-volume(1)](/docs/podman-volume.1.md) | Manage Volumes ||
| [podman-volume-create(1)](/docs/podman-volume-create.1.md) | Create a volume ||
| [podman-volume-inspect(1)](/docs/podman-volume-inspect.1.md) | Get detailed information on one or more volumes ||
| [podman-volume-ls(1)](/docs/podman-volume-ls.1.md) | List all the available volumes ||
diff --git a/completions/bash/podman b/completions/bash/podman
index 9df87aef4..36ac27d52 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -1691,6 +1691,7 @@ _podman_container_run() {
--oom-score-adj
--pid
--pids-limit
+ --pod
--publish -p
--runtime
--rootfs
@@ -2319,6 +2320,7 @@ _podman_login() {
local boolean_options="
--help
-h
+ --password-stdin
"
_complete_ "$options_with_args" "$boolean_options"
}
diff --git a/contrib/cirrus/integration_test.sh b/contrib/cirrus/integration_test.sh
index 58c8af289..0fd86dfdc 100755
--- a/contrib/cirrus/integration_test.sh
+++ b/contrib/cirrus/integration_test.sh
@@ -19,7 +19,7 @@ case "${OS_RELEASE_ID}-${OS_RELEASE_VER}" in
ubuntu-18)
make install PREFIX=/usr ETCDIR=/etc
make test-binaries
- SKIP_USERNS=1 make localintegration
+ SKIP_USERNS=1 make localintegration GINKGOTIMEOUT=90m
;;
fedora-29) ;& # Continue to the next item
fedora-28) ;&
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 39e6c7699..8be696933 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -325,7 +325,7 @@ install_packer_copied_files(){
/etc/containers/registries.d/registry.access.redhat.com.yaml
}
-install_varlink(){
+install_varlink() {
echo "Installing varlink from the cheese-factory"
ooe.sh sudo -H pip3 install varlink
}
diff --git a/contrib/cirrus/packer/Makefile b/contrib/cirrus/packer/Makefile
index 0a783e979..91a1dfeef 100644
--- a/contrib/cirrus/packer/Makefile
+++ b/contrib/cirrus/packer/Makefile
@@ -4,7 +4,9 @@
# e.g for names see libpod_images.yml
PACKER_VER ?= 1.3.2
-PACKER_DIST_FILENAME := packer_${PACKER_VER}_linux_amd64.zip
+GOARCH=$(shell go env GOARCH)
+ARCH=$(uname -m)
+PACKER_DIST_FILENAME := packer_${PACKER_VER}_linux_${GOARCH}.zip
# Only needed for libpod_base_images target
TIMESTAMP := $(shell date +%s)
@@ -30,7 +32,7 @@ ${PACKER_DIST_FILENAME}:
packer: ${PACKER_DIST_FILENAME}
@curl -L --silent --show-error \
https://releases.hashicorp.com/packer/${PACKER_VER}/packer_${PACKER_VER}_SHA256SUMS \
- | grep 'linux_amd64' > /tmp/packer_sha256sums
+ | grep linux_${GOARCH} > /tmp/packer_sha256sums
@sha256sum --check /tmp/packer_sha256sums
@unzip -o ${PACKER_DIST_FILENAME}
@touch --reference=Makefile ${PACKER_DIST_FILENAME}
@@ -93,7 +95,7 @@ endif
-var GOSRC=$(GOSRC) \
-var PACKER_BASE=$(PACKER_BASE) \
-var SCRIPT_BASE=$(SCRIPT_BASE) \
- -var RHEL_BASE_IMAGE_NAME=$(shell basename $(RHEL_IMAGE_FILE) | tr -d '[[:space:]]' | sed -r -e 's/\.x86_64\.raw\.xz//' | tr '[[:upper:]]' '[[:lower:]]' | tr '[[:punct:]]' '-') \
+ -var RHEL_BASE_IMAGE_NAME=$(shell basename $(RHEL_IMAGE_FILE) | tr -d '[[:space:]]' | sed -r -e 's/\.${ARCH}\.raw\.xz//' | tr '[[:upper:]]' '[[:lower:]]' | tr '[[:punct:]]' '-') \
-var RHEL_IMAGE_FILE=$(RHEL_IMAGE_FILE) \
-var RHEL_CSUM_FILE=$(RHEL_CSUM_FILE) \
-var 'RHSM_COMMAND=$(RHSM_COMMAND)' \
diff --git a/contrib/cirrus/packer/fedora_setup.sh b/contrib/cirrus/packer/fedora_setup.sh
index 01c468901..de7ad4506 100644
--- a/contrib/cirrus/packer/fedora_setup.sh
+++ b/contrib/cirrus/packer/fedora_setup.sh
@@ -40,6 +40,7 @@ ooe.sh sudo dnf install -y \
golang-github-cpuguy83-go-md2man \
gpgme-devel \
iptables \
+ iproute \
libassuan-devel \
libcap-devel \
libnet \
diff --git a/contrib/cirrus/packer/ubuntu_setup.sh b/contrib/cirrus/packer/ubuntu_setup.sh
index af5671c90..5b7e1d714 100644
--- a/contrib/cirrus/packer/ubuntu_setup.sh
+++ b/contrib/cirrus/packer/ubuntu_setup.sh
@@ -28,7 +28,13 @@ export DEBIAN_FRONTEND=noninteractive
echo "Updating system and installing package dependencies"
ooe.sh sudo -E apt-get -qq update || sudo -E apt-get -qq update
ooe.sh sudo -E apt-get -qq upgrade || sudo -E apt-get -qq upgrade
-ooe.sh sudo -E apt-get -qq install --no-install-recommends \
+ooe.sh sudo -E apt-get -qq install software-properties-common
+
+# Required to have Go 1.11 on Ubuntu 18.0.4
+ooe.sh sudo -E add-apt-repository --yes ppa:longsleep/golang-backports
+ooe.sh sudo -E apt-get -qq update || sudo -E apt-get -qq update
+
+ooe.sh sudo -E apt-get -qq install \
apparmor \
autoconf \
automake \
@@ -42,6 +48,7 @@ ooe.sh sudo -E apt-get -qq install --no-install-recommends \
gettext \
go-md2man \
golang \
+ iproute \
iptables \
libaio-dev \
libapparmor-dev \
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 838f3c3f3..77c20d9bd 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -79,8 +79,9 @@ then
cd "${GOSRC}/"
source "$SCRIPT_BASE/lib.sh"
- # Only testing-VMs need deps installed
- [[ -n "$PACKER_BUILDS" ]] || install_testing_dependencies # must exist in $GOPATH
+ # Only testing-VMs need deps installed, not image-builder VM
+ echo "$CIRRUS_TASK_NAME" | grep -q 'image' || \
+ install_testing_dependencies # must exist in $GOPATH
fi
record_timestamp "env. setup end"
diff --git a/contrib/perftest/main.go b/contrib/perftest/main.go
index 6a6725ab9..237f4f6e6 100644
--- a/contrib/perftest/main.go
+++ b/contrib/perftest/main.go
@@ -201,7 +201,7 @@ func runSingleThreadedStressTest(ctx context.Context, client *libpod.Runtime, im
// Start container
startStartTime := time.Now()
- err = ctr.Start(ctx)
+ err = ctr.Start(ctx, false)
if err != nil {
return nil, err
}
@@ -218,7 +218,7 @@ func runSingleThreadedStressTest(ctx context.Context, client *libpod.Runtime, im
//Delete Container
deleteStartTime := time.Now()
- err = client.RemoveContainer(ctx, ctr, true)
+ err = client.RemoveContainer(ctx, ctr, true, false)
if err != nil {
return nil, err
}
diff --git a/docs/libpod.conf.5.md b/docs/libpod.conf.5.md
index 98eb5bece..0836c45fa 100644
--- a/docs/libpod.conf.5.md
+++ b/docs/libpod.conf.5.md
@@ -12,8 +12,11 @@ libpod to manage containers.
**image_default_transport**=""
Default transport method for pulling and pushing images
-**runtime_path**=""
- Paths to search for a valid OCI runtime binary
+**runtime**=""
+ Default OCI runtime to use if nothing is specified
+
+**runtimes**
+ For each OCI runtime, specify a list of paths to look for. The first one found is used.
**conmon_path**=""
Paths to search for the Conmon container manager binary
@@ -79,6 +82,11 @@ libpod to manage containers.
**label**="true|false"
Indicates whether the containers should use label separation.
+**num_locks**=""
+ Number of locks available for containers and pods. Each created container or pod consumes one lock.
+ The default number available is 2048.
+ If this is changed, a lock renumbering must be performed, using the `podman system renumber` command.
+
## FILES
`/usr/share/containers/libpod.conf`, default libpod configuration path
diff --git a/docs/podman-attach.1.md b/docs/podman-attach.1.md
index 4322ca2be..ceb945a0e 100644
--- a/docs/podman-attach.1.md
+++ b/docs/podman-attach.1.md
@@ -24,6 +24,8 @@ ctrl-[value] where [value] is one of: a-z, @, ^, [, , or _.
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--no-stdin**
Do not attach STDIN. The default is false.
diff --git a/docs/podman-build.1.md b/docs/podman-build.1.md
index 76e16b42c..fdae48b93 100644
--- a/docs/podman-build.1.md
+++ b/docs/podman-build.1.md
@@ -362,6 +362,12 @@ Specifies the name which will be assigned to the resulting image if the build
process completes successfully.
If _imageName_ does not include a registry name, the registry name *localhost* will be prepended to the image name.
+**--target** *stageName*
+
+Set the target build stage to build. When building a Dockerfile with multiple build stages, --target
+can be used to specify an intermediate build stage by name as the final stage for the resulting image.
+Commands after the target stage will be skipped.
+
**--tls-verify** *bool-value*
Require HTTPS and verify certificates when talking to container registries (defaults to true).
diff --git a/docs/podman-commit.1.md b/docs/podman-commit.1.md
index 79e14aba6..acde51859 100644
--- a/docs/podman-commit.1.md
+++ b/docs/podman-commit.1.md
@@ -76,7 +76,7 @@ e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8
```
```
-$ podman commit -q --pause=false reverent_golick image-commited
+$ podman commit -q --pause=false containerID image-commited
e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8
```
diff --git a/docs/podman-container-checkpoint.1.md b/docs/podman-container-checkpoint.1.md
index 94e52dc78..666dbbb80 100644
--- a/docs/podman-container-checkpoint.1.md
+++ b/docs/podman-container-checkpoint.1.md
@@ -25,6 +25,8 @@ Checkpoint all running containers.
Instead of providing the container name or ID, checkpoint the last created container.
+The latest option is not supported on the remote client.
+
**--leave-running, -R**
Leave the container running after checkpointing instead of stopping it.
diff --git a/docs/podman-container-cleanup.1.md b/docs/podman-container-cleanup.1.md
index 2819d1138..2ad39d214 100644
--- a/docs/podman-container-cleanup.1.md
+++ b/docs/podman-container-cleanup.1.md
@@ -19,6 +19,9 @@ Cleanup all containers.
**--latest, -l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+
+The latest option is not supported on the remote client.
+
## EXAMPLE
`podman container cleanup mywebserver`
@@ -27,7 +30,7 @@ to run containers such as CRI-O, the last started container could be from either
`podman container cleanup 860a4b23`
-`podman container-cleanup -a`
+`podman container cleanup -a`
`podman container cleanup --latest`
diff --git a/docs/podman-container-restore.1.md b/docs/podman-container-restore.1.md
index 44219f3ef..e47d585cc 100644
--- a/docs/podman-container-restore.1.md
+++ b/docs/podman-container-restore.1.md
@@ -32,6 +32,8 @@ Restore all checkpointed containers.
Instead of providing the container name or ID, restore the last created container.
+The latest option is not supported on the remote client.
+
**--tcp-established**
Restore a container with established TCP connections. If the checkpoint image
diff --git a/docs/podman-cp.1.md b/docs/podman-cp.1.md
index 88e50e86b..37426b236 100644
--- a/docs/podman-cp.1.md
+++ b/docs/podman-cp.1.md
@@ -3,20 +3,70 @@
## NAME
podman\-cp - Copy files/folders between a container and the local filesystem
-## Description
-We chose not to implement the `cp` feature in `podman` even though the upstream Docker
-project has it. We have a much stronger capability. Using standard podman-mount
-and podman-umount, we can take advantage of the entire linux tool chain, rather
+## SYNOPSIS
+**podman cp [CONTAINER:]SRC_PATH [CONTAINER:]DEST_PATH**
+
+## DESCRIPTION
+Copies the contents of **SRC_PATH** to the **DEST_PATH**. You can copy from the containers's filesystem to the local machine or the reverse, from the local filesystem to the container.
+
+The CONTAINER can be a running or stopped container. The **SRC_PATH** or **DEST_PATH** can be a file or directory.
+
+The **podman cp** command assumes container paths are relative to the container's / (root) directory.
+
+This means supplying the initial forward slash is optional;
+
+The command sees **compassionate_darwin:/tmp/foo/myfile.txt** and **compassionate_darwin:tmp/foo/myfile.txt** as identical.
+
+Local machine paths can be an absolute or relative value.
+The command interprets a local machine's relative paths as relative to the current working directory where **podman cp** is run.
+
+Assuming a path separator of /, a first argument of **SRC_PATH** and second argument of **DEST_PATH**, the behavior is as follows:
+
+**SRC_PATH** specifies a file
+ - **DEST_PATH** does not exist
+ - the file is saved to a file created at **DEST_PATH**
+ - **DEST_PATH** does not exist and ends with /
+ - **DEST_PATH** is created as a directory and the file is copied into this directory using the basename from **SRC_PATH**
+ - **DEST_PATH** exists and is a file
+ - the destination is overwritten with the source file's contents
+ - **DEST_PATH** exists and is a directory
+ - the file is copied into this directory using the basename from **SRC_PATH**
+
+**SRC_PATH** specifies a directory
+ - **DEST_PATH** does not exist
+ - **DEST_PATH** is created as a directory and the contents of the source directory are copied into this directory
+ - **DEST_PATH** exists and is a file
+ - Error condition: cannot copy a directory to a file
+ - **DEST_PATH** exists and is a directory
+ - **SRC_PATH** ends with /
+ - the source directory is copied into this directory
+ - **SRC_PATH** ends with /. (that is: slash followed by dot)
+ - the content of the source directory is copied into this directory
+
+The command requires **SRC_PATH** and **DEST_PATH** to exist according to the above rules.
+
+If **SRC_PATH** is local and is a symbolic link, the symbolic target, is copied by default.
+
+A colon (:) is used as a delimiter between CONTAINER and its path.
+
+You can also use : when specifying paths to a **SRC_PATH** or **DEST_PATH** on a local machine, for example, `file:name.txt`.
+
+If you use a : in a local machine path, you must be explicit with a relative or absolute path, for example:
+ `/path/to/file:name.txt` or `./file:name.txt`
+
+
+## ALTERNATIVES
+
+Podman has much stronger capabilities than just `podman cp` to achieve copy files between host and container.
+
+Using standard podman-mount and podman-umount takes advantage of the entire linux tool chain, rather
then just cp.
-If a user wants to copy contents out of a container or into a container, they
-can execute a few simple commands.
+If a user wants to copy contents out of a container or into a container, they can execute a few simple commands.
-You can copy from the container's file system to the local machine or the
-reverse, from the local filesystem to the container.
+You can copy from the container's file system to the local machine or the reverse, from the local filesystem to the container.
-If you want to copy the /etc/foobar directory out of a container and onto /tmp
-on the host, you could execute the following commands:
+If you want to copy the /etc/foobar directory out of a container and onto /tmp on the host, you could execute the following commands:
mnt=$(podman mount CONTAINERID)
cp -R ${mnt}/etc/foobar /tmp
@@ -40,5 +90,15 @@ This shows that using `podman mount` and `podman umount` you can use all of the
standard linux tools for moving files into and out of containers, not just
the cp command.
+## EXAMPLE
+
+podman cp /myapp/app.conf containerID:/myapp/app.conf
+
+podman cp /home/myuser/myfiles.tar containerID:/tmp
+
+podman cp containerID:/myapp/ /myapp/
+
+podman cp containerID:/home/myuser/. /home/myuser/
+
## SEE ALSO
podman(1), podman-mount(1), podman-umount(1)
diff --git a/docs/podman-create.1.md b/docs/podman-create.1.md
index 98b1a2a17..342ef59c3 100644
--- a/docs/podman-create.1.md
+++ b/docs/podman-create.1.md
@@ -657,18 +657,21 @@ The followings examples are all valid:
Without this argument the command will be run as root in the container.
-**--userns**=""
+**--userns**=host
+**--userns**=ns:my_namespace
-Set the usernamespace mode for the container. The use of userns is disabled by default.
+Set the user namespace mode for the container. The use of userns is disabled by default.
- **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`).
- **ns**: specify the usernamespace to use.
+- `host`: run in the user namespace of the caller. This is the default if no user namespace options are set. The processes running in the container will have the same privileges on the host as any other process launched by the calling user.
+- `ns`: run the container in the given existing user namespace.
+
+This option is incompatible with --gidmap, --uidmap, --subuid and --subgid
**--uts**=*host*
Set the UTS mode for the container
**host**: use the host's UTS namespace inside the container.
- **ns**: specify the usernamespace to use.
+ **ns**: specify the user namespace to use.
Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure.
**--volume**, **-v**[=*[HOST-DIR:CONTAINER-DIR[:OPTIONS]]*]
@@ -780,10 +783,28 @@ can override the working directory by using the **-w** option.
## EXAMPLES
+### Create a container using a local image
+
+```
+$ podman create alpine ls
+```
+
+### Create a container using a local image and annotate it
+
+```
+$ podman create --annotation HELLO=WORLD alpine ls
+```
+
+### Create a container using a local image, allocating a pseudo-TTY, keeping stdin open and name it myctr
+
+```
+ podman create -t -i --name myctr alpine ls
+```
+
### Set UID/GID mapping in a new user namespace
-If you want to run the container in a new user namespace and define the mapping of
-the uid and gid from the host.
+Running a container in a new user namespace requires a mapping of
+the uids and gids from the host.
```
$ podman create --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello
@@ -804,13 +825,27 @@ KillMode=process
WantedBy=multi-user.target
```
+### Rootless Containers
+
+Podman runs as a non root user on most systems. This feature requires that a new enough version of shadow-utils
+be installed. The shadow-utils package must include the newuidmap and newgidmap executables.
+
+Note: RHEL7 and Centos 7 will not have this feature until RHEL7.7 is released.
+
+In order for users to run rootless, there must be an entry for their username in /etc/subuid and /etc/subgid which lists the UIDs for their user namespace.
+
+Rootless podman works better if the fuse-overlayfs and slirp4netns packages are installed.
+The fuse-overlay package provides a userspace overlay storage driver, otherwise users need to use
+the vfs storage driver, which is diskspace expensive and does not perform well. slirp4netns is
+required for VPN, without it containers need to be run with the --net=host flag.
+
## FILES
**/etc/subuid**
**/etc/subgid**
## SEE ALSO
-subgid(5), subuid(5), libpod.conf(5), systemd.unit(5), setsebool(8)
+subgid(5), subuid(5), libpod.conf(5), systemd.unit(5), setsebool(8), slirp4netns(1), fuse-overlayfs(1)
## HISTORY
October 2017, converted from Docker documentation to podman by Dan Walsh for podman <dwalsh@redhat.com>
diff --git a/docs/podman-diff.1.md b/docs/podman-diff.1.md
index dd5f0edcf..8837b744f 100644
--- a/docs/podman-diff.1.md
+++ b/docs/podman-diff.1.md
@@ -15,6 +15,12 @@ Displays changes on a container or image's filesystem. The container or image w
Alter the output into a different format. The only valid format for diff is `json`.
+**--latest, -l**
+
+Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
+to run containers such as CRI-O, the last started container could be from either of those methods.
+
+The latest option is not supported on the remote client.
## EXAMPLE
diff --git a/docs/podman-exec.1.md b/docs/podman-exec.1.md
index 77317b0ca..14088b468 100644
--- a/docs/podman-exec.1.md
+++ b/docs/podman-exec.1.md
@@ -4,7 +4,7 @@
podman\-exec - Execute a command in a running container
## SYNOPSIS
-**podman exec** *container* [*options*] [*command* [*arg* ...]]
+**podman exec** [*options*] *container* [*command* [*arg* ...]]
## DESCRIPTION
**podman exec** executes a command in a running container.
@@ -24,6 +24,8 @@ Not supported. All exec commands are interactive by default.
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--privileged**
Give the process extended Linux capabilities when running the command in container.
@@ -46,6 +48,12 @@ The default working directory for running binaries within a container is the roo
The image developer can set a different default with the WORKDIR instruction, which can be overridden
when creating the container.
+## EXAMPLES
+
+$ podman exec -it ctrID ls
+$ podman exec -it -w /tmp myCtr pwd
+$ podman exec --user root ctrID ls
+
## SEE ALSO
podman(1), podman-run(1)
diff --git a/docs/podman-generate-kube.1.md b/docs/podman-generate-kube.1.md
index 5236f23fe..d4bed8ab1 100644
--- a/docs/podman-generate-kube.1.md
+++ b/docs/podman-generate-kube.1.md
@@ -19,6 +19,8 @@ The **service** option can be used to generate a Service specification for the c
if the object has portmap bindings, the service specification will include a NodePort declaration to expose the service. A
random port is assigned by Podman in the specification.
+Note that the generated Kubernetes YAML file can be used to re-run the deployment via podman-play-kube(1).
+
# OPTIONS:
**s** **--service**
@@ -145,7 +147,7 @@ status:
```
## SEE ALSO
-podman(1), podman-container, podman-pod, podman-play
+podman(1), podman-container(1), podman-pod(1), podman-play-kube(1)
# HISTORY
Decemeber 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-image-trust.1.md b/docs/podman-image-trust.1.md
index 668fee0f3..819035040 100644
--- a/docs/podman-image-trust.1.md
+++ b/docs/podman-image-trust.1.md
@@ -14,19 +14,29 @@ podman\-trust - Manage container registry image trust policy
REGISTRY[/REPOSITORY]
# DESCRIPTION
-Manages which registries you trust as a source of container images based on its location. The location is determined by the transport and the registry host of the image. Using this container image `docker://docker.io/library/busybox` as an example, `docker` is the transport and `docker.io` is the registry host.
+Manages which registries you trust as a source of container images based on its location. The location is determined
+by the transport and the registry host of the image. Using this container image `docker://docker.io/library/busybox`
+as an example, `docker` is the transport and `docker.io` is the registry host.
-The trust policy describes a registry scope (registry and/or repository). This trust can use public keys for signed images.
+Trust is defined in **/etc/containers/policy.json** and is enforced when a user attempts to pull
+a remote image from a registry. The trust policy in policy.json describes a registry scope (registry and/or repository) for the trust. This trust can use public keys for signed images.
-Trust is defined in **/etc/containers/policy.json** and is enforced when a user attempts to pull an image from a registry that is managed by policy.json.
+The scope of the trust is evaluated from most specific to the least specific. In other words, a policy may be defined for an entire registry. Or it could be defined for a particular repository in that registry. Or it could be defined down to a specific signed image inside of the registry.
-The scope of the trust is evaluated from most specific to the least specific. In other words, a policy may be defined for an entire registry. Or it could be defined for a particular repository in that registry. Or it could be defined down to a specific signed image inside of the registry. See below for examples.
+For example, the following list includes valid scope values that could be used in policy.json from most specific to the least specific:
+
+docker.io/library/busybox:notlatest
+docker.io/library/busybox
+docker.io/library
+docker.io
+
+If no configuration is found for any of these scopes, the default value (specified by using "default" instead of REGISTRY[/REPOSITORY]) is used.
Trust **type** provides a way to:
Whitelist ("accept") or
-Blacklist ("reject") registries.
-
+Blacklist ("reject") registries or
+Require signature (“signedBy”).
Trust may be updated using the command **podman image trust set** for an existing trust scope.
@@ -36,10 +46,8 @@ Trust may be updated using the command **podman image trust set** for an existin
**-f** **--pubkeysfile**
A path to an exported public key on the local system. Key paths
- will be referenced in policy.json. Any path may be used but the path
- **/etc/pki/containers** is recommended. Options may be used multiple times to
- require an image be signed by multiple keys. One of **--pubkeys** or
- **--pubkeysfile** is required for the **signedBy** type.
+ will be referenced in policy.json. Any path to a file may be used but locating the file in **/etc/pki/containers** is recommended. Options may be used multiple times to
+ require an image be signed by multiple keys. The **--pubkeysfile** option is required for the **signedBy** type.
**-t** **--type**
The trust type for this policy entry. Accepted values:
@@ -84,7 +92,5 @@ Display trust as JSON
policy-json(5)
# HISTORY
-
January 2019, updated by Tom Sweeney (tsweeney at redhat dot com)
-
December 2018, originally compiled by Qi Wang (qiwan at redhat dot com)
diff --git a/docs/podman-inspect.1.md b/docs/podman-inspect.1.md
index b01bc0f4e..5748f29f4 100644
--- a/docs/podman-inspect.1.md
+++ b/docs/podman-inspect.1.md
@@ -27,6 +27,8 @@ The keys of the returned JSON can be used as the values for the --format flag (s
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--size, -s**
Display the total file size if the type is a container
diff --git a/docs/podman-kill.1.md b/docs/podman-kill.1.md
index 85f68a73d..1c14b71d5 100644
--- a/docs/podman-kill.1.md
+++ b/docs/podman-kill.1.md
@@ -19,6 +19,8 @@ Signal all running containers. This does not include paused containers.
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--signal, s**
Signal to send to the container. For more information on Linux signals, refer to *man signal(7)*.
diff --git a/docs/podman-load.1.md b/docs/podman-load.1.md
index eca7ecb2e..8b6501a5c 100644
--- a/docs/podman-load.1.md
+++ b/docs/podman-load.1.md
@@ -22,7 +22,9 @@ Note: `:` is a restricted character and cannot be part of the file name.
**--input, -i**
-Read from archive file, default is STDIN
+Read from archive file, default is STDIN.
+
+The remote client requires the use of this option.
**--quiet, -q**
diff --git a/docs/podman-login.1.md b/docs/podman-login.1.md
index e72d1deca..3ac0e30ef 100644
--- a/docs/podman-login.1.md
+++ b/docs/podman-login.1.md
@@ -25,6 +25,10 @@ flag. The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**.
Password for registry
+**--password-stdin**
+
+Take the password from stdin
+
**--username, -u**
Username for registry
@@ -86,6 +90,16 @@ $ podman login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000
Login Succeeded!
```
+```
+$ podman login -u testuser --password-stdin < testpassword.txt docker.io
+Login Succeeded!
+```
+
+```
+$ echo $testpassword | podman login -u testuser --password-stdin docker.io
+Login Succeeded!
+```
+
## SEE ALSO
podman(1), podman-logout(1), crio(8)
diff --git a/docs/podman-logs.1.md b/docs/podman-logs.1.md
index 71a798bc0..bc02df954 100644
--- a/docs/podman-logs.1.md
+++ b/docs/podman-logs.1.md
@@ -17,6 +17,13 @@ any logs at the time you execute podman logs
Follow log output. Default is false
+**--latest, -l**
+
+Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
+to run containers such as CRI-O, the last started container could be from either of those methods.
+
+The latest option is not supported on the remote client.
+
**--since=TIMESTAMP**
Show logs since TIMESTAMP. The --since option can be Unix timestamps, date formatted timestamps, or Go duration
@@ -61,7 +68,7 @@ podman logs --tail 2 b3f2436bdb97
To view a containers logs since a certain time:
```
-podman logs 224c375f27cd --since 2017-08-07T10:10:09.055837383-04:00 myserver
+podman logs --since 2017-08-07T10:10:09.055837383-04:00 myserver
1:M 07 Aug 14:10:09.055 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted.
1:M 07 Aug 14:10:09.055 # Current maximum open files is 4096. maxclients has been reduced to 4064 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'.
@@ -72,7 +79,7 @@ podman logs 224c375f27cd --since 2017-08-07T10:10:09.055837383-04:00 myserver
To view a container's logs generated in the last 10 minutes:
```
-podman logs 224c375f27cd --since 10m myserver
+podman logs --since 10m myserver
1:M 07 Aug 14:10:09.055 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted.
1:M 07 Aug 14:10:09.055 # Current maximum open files is 4096. maxclients has been reduced to 4064 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'.
diff --git a/docs/podman-mount.1.md b/docs/podman-mount.1.md
index 2cccf5ee0..e244e5daf 100644
--- a/docs/podman-mount.1.md
+++ b/docs/podman-mount.1.md
@@ -33,6 +33,8 @@ Instead of providing the container name or ID, use the last created container.
If you use methods other than Podman to run containers such as CRI-O, the last
started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--notruncate**
Do not truncate IDs in output.
diff --git a/docs/podman-play-kube.1.md b/docs/podman-play-kube.1.md
index 3fd9746a5..2264f7a88 100644
--- a/docs/podman-play-kube.1.md
+++ b/docs/podman-play-kube.1.md
@@ -20,7 +20,7 @@ kubernetes_input.yml
the pod and containers described in the YAML. The containers within the pod are then started and
the ID of the new Pod is output.
-Ideally the input file would be one created by Podman. This would guarantee a smooth import and expected results.
+Ideally the input file would be one created by Podman (see podman-generate-kube(1)). This would guarantee a smooth import and expected results.
# OPTIONS:
@@ -72,7 +72,7 @@ $ podman play kube demo.yml
```
## SEE ALSO
-podman(1), podman-container(1), podman-pod(1), podman-generate(1), podman-play(1)
+podman(1), podman-container(1), podman-pod(1), podman-generate-kube(1), podman-play(1)
# HISTORY
Decemeber 2018, Originally compiled by Brent Baude (bbaude at redhat dot com)
diff --git a/docs/podman-pod-create.1.md b/docs/podman-pod-create.1.md
index a63b12d73..06f962849 100644
--- a/docs/podman-pod-create.1.md
+++ b/docs/podman-pod-create.1.md
@@ -10,8 +10,8 @@ podman\-pod\-create - Create a new pod
Creates an empty pod, or unit of multiple containers, and prepares it to have
containers added to it. The pod id is printed to STDOUT. You can then use
-**podman create --pod <pod_id|pod_name> ...** to add containers to the pod, and
-**podman pod start <pod_id|pod_name>** to start the pod.
+**podman create --pod \<pod_id|pod_name\> ...** to add containers to the pod, and
+**podman pod start \<pod_id|pod_name\>** to start the pod.
## OPTIONS
diff --git a/docs/podman-pod-inspect.1.md b/docs/podman-pod-inspect.1.md
index b8fb143ba..a4e6a5559 100644
--- a/docs/podman-pod-inspect.1.md
+++ b/docs/podman-pod-inspect.1.md
@@ -16,6 +16,7 @@ that belong to the pod.
Instead of providing the pod name or ID, use the last created pod. If you use methods other than Podman
to run pods such as CRI-O, the last started pod could be from either of those methods.
+The latest option is not supported on the remote client.
## EXAMPLE
```
diff --git a/docs/podman-pod-kill.1.md b/docs/podman-pod-kill.1.md
index 4376c2d29..2a863d3d9 100644
--- a/docs/podman-pod-kill.1.md
+++ b/docs/podman-pod-kill.1.md
@@ -19,6 +19,8 @@ Sends signal to all containers associated with a pod.
Instead of providing the pod name or ID, use the last created pod. If you use methods other than Podman
to run pods such as CRI-O, the last started pod could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--signal, s**
Signal to send to the containers in the pod. For more information on Linux signals, refer to *man signal(7)*.
diff --git a/docs/podman-pod-pause.1.md b/docs/podman-pod-pause.1.md
index 32c5b927e..418a9ee2a 100644
--- a/docs/podman-pod-pause.1.md
+++ b/docs/podman-pod-pause.1.md
@@ -19,6 +19,8 @@ Pause all pods.
Instead of providing the pod name or ID, pause the last created pod.
+The latest option is not supported on the remote client.
+
## EXAMPLE
podman pod pause mywebserverpod
diff --git a/docs/podman-pod-ps.1.md b/docs/podman-pod-ps.1.md
index 24b343438..ee215154a 100644
--- a/docs/podman-pod-ps.1.md
+++ b/docs/podman-pod-ps.1.md
@@ -42,6 +42,8 @@ Includes the container statuses in the container info field
Show the latest pod created (all states)
+The latest option is not supported on the remote client.
+
**--no-trunc**
Display the extended information
diff --git a/docs/podman-pod-restart.1.md b/docs/podman-pod-restart.1.md
index 17fb4ed4c..cd6e5c8ce 100644
--- a/docs/podman-pod-restart.1.md
+++ b/docs/podman-pod-restart.1.md
@@ -22,8 +22,11 @@ Restarts all pods
Instead of providing the pod name or ID, restart the last created pod.
+The latest option is not supported on the remote client.
+
## EXAMPLE
+```
podman pod restart mywebserverpod
cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
@@ -31,8 +34,6 @@ podman pod restart 490eb 3557fb
490eb241aaf704d4dd2629904410fe4aa31965d9310a735f8755267f4ded1de5
3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
-3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
-
podman pod restart --latest
3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab
@@ -42,6 +43,7 @@ podman pod restart --all
490eb241aaf704d4dd2629904410fe4aa31965d9310a735f8755267f4ded1de5
70c358daecf71ef9be8f62404f926080ca0133277ef7ce4f6aa2d5af6bb2d3e9
cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
+```
## SEE ALSO
podman-pod(1), podman-pod-start(1), podman-restart(1)
diff --git a/docs/podman-pod-rm.1.md b/docs/podman-pod-rm.1.md
index 7a300c152..aa26a1bbb 100644
--- a/docs/podman-pod-rm.1.md
+++ b/docs/podman-pod-rm.1.md
@@ -19,6 +19,8 @@ Remove all pods. Can be used in conjunction with \-f as well.
Instead of providing the pod name or ID, remove the last created pod.
+The latest option is not supported on the remote client.
+
**--force, f**
Stop running containers and delete all stopped containers before removal of pod.
diff --git a/docs/podman-pod-start.1.md b/docs/podman-pod-start.1.md
index 253bb5f53..27e40740f 100644
--- a/docs/podman-pod-start.1.md
+++ b/docs/podman-pod-start.1.md
@@ -20,6 +20,8 @@ Starts all pods
Instead of providing the pod name or ID, start the last created pod.
+The latest option is not supported on the remote client.
+
## EXAMPLE
podman pod start mywebserverpod
diff --git a/docs/podman-pod-stop.1.md b/docs/podman-pod-stop.1.md
index 7544f8bf7..338e04d67 100644
--- a/docs/podman-pod-stop.1.md
+++ b/docs/podman-pod-stop.1.md
@@ -19,6 +19,8 @@ Stops all pods
Instead of providing the pod name or ID, stop the last created pod.
+The latest option is not supported on the remote client.
+
**--timeout, --time, t**
Timeout to wait before forcibly stopping the containers in the pod.
diff --git a/docs/podman-pod-top.1.md b/docs/podman-pod-top.1.md
index 0b330eb03..a77ca2b37 100644
--- a/docs/podman-pod-top.1.md
+++ b/docs/podman-pod-top.1.md
@@ -19,6 +19,8 @@ Display the running process of containers in a pod. The *format-descriptors* are
Instead of providing the pod name or ID, use the last created pod.
+The latest option is not supported on the remote client.
+
## FORMAT DESCRIPTORS
The following descriptors are supported in addition to the AIX format descriptors mentioned in ps (1):
diff --git a/docs/podman-pod-unpause.1.md b/docs/podman-pod-unpause.1.md
index 643544ef1..1004e09f9 100644
--- a/docs/podman-pod-unpause.1.md
+++ b/docs/podman-pod-unpause.1.md
@@ -19,6 +19,8 @@ Unpause all pods.
Instead of providing the pod name or ID, unpause the last created pod.
+The latest option is not supported on the remote client.
+
## EXAMPLE
podman pod unpause mywebserverpod
diff --git a/docs/podman-port.1.md b/docs/podman-port.1.md
index 874ea5036..020a25d32 100644
--- a/docs/podman-port.1.md
+++ b/docs/podman-port.1.md
@@ -21,11 +21,13 @@ or private ports/protocols as filters.
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
## EXAMPLE
List all port mappings
```
-#podman port -a
+# podman port -a
b4d2f05432e482e017b1a4b2eae15fa7b4f6fb7e9f65c1bde46294fdef285906
80/udp -> 0.0.0.0:44327
80/tcp -> 0.0.0.0:44327
@@ -34,21 +36,21 @@ b4d2f05432e482e017b1a4b2eae15fa7b4f6fb7e9f65c1bde46294fdef285906
List port mappings for a specific container
```
-#podman port b4d2f054
+# podman port b4d2f054
80/udp -> 0.0.0.0:44327
80/tcp -> 0.0.0.0:44327
#
```
List the port mappings for the latest container and port 80
```
-#podman port b4d2f054 80
+# podman port b4d2f054 80
0.0.0.0:44327
#
```
List the port mappings for a specific container for port 80 and the tcp protocol.
```
-#podman port b4d2f054 80/tcp
+# podman port b4d2f054 80/tcp
0.0.0.0:44327
#
```
diff --git a/docs/podman-ps.1.md b/docs/podman-ps.1.md
index 8b86703d8..b8b1c3d62 100644
--- a/docs/podman-ps.1.md
+++ b/docs/podman-ps.1.md
@@ -75,6 +75,8 @@ Print the n last created containers (all states)
Show the latest container created (all states)
+The latest option is not supported on the remote client.
+
**--namespace, --ns**
Display namespace information
diff --git a/docs/podman-pull.1.md b/docs/podman-pull.1.md
index 2196e251e..bce11b096 100644
--- a/docs/podman-pull.1.md
+++ b/docs/podman-pull.1.md
@@ -45,6 +45,10 @@ Image stored in local container/storage
## OPTIONS
+**--all-tags, a**
+
+All tagged images in the repository will be pulled.
+
**--authfile**
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
diff --git a/docs/podman-restart.1.md b/docs/podman-restart.1.md
index 875afa385..dd28e34c7 100644
--- a/docs/podman-restart.1.md
+++ b/docs/podman-restart.1.md
@@ -19,6 +19,8 @@ Restart all containers regardless of their current state.
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--running**
Restart all containers that are already in the *running* state.
diff --git a/docs/podman-rm.1.md b/docs/podman-rm.1.md
index 4fcb0b6c5..dc1729188 100644
--- a/docs/podman-rm.1.md
+++ b/docs/podman-rm.1.md
@@ -1,9 +1,11 @@
-% podman-rm(1)
+% podman-container-rm(1)
## NAME
-podman\-rm - Remove one or more containers
+podman\-container\-rm (podman\-rm) - Remove one or more containers
## SYNOPSIS
+**podman container rm** [*options*] *container*
+
**podman rm** [*options*] *container*
## DESCRIPTION
@@ -17,13 +19,17 @@ Remove all containers. Can be used in conjunction with -f as well.
**--force, f**
-Force the removal of a running and paused containers
+Force the removal of running and paused containers. Forcing a containers removal also
+removes containers from container storage even if the container is not known to podman.
+Containers could have been created by a different container engine.
**--latest, -l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--volumes, -v**
Remove the volumes associated with the container. (Not yet implemented)
@@ -53,8 +59,13 @@ Forcibly remove the latest container created.
podman rm -f --latest
```
+## Exit Status
+**_0_** if all specified containers removed
+**_1_** if one of the specified containers did not exist, and no other failures
+**_125_** if command fails for a reason other then an container did not exist
+
## SEE ALSO
-podman(1), podman-rmi(1)
+podman(1), podman-image-rm(1)
## HISTORY
August 2017, Originally compiled by Ryan Cole <rycole@redhat.com>
diff --git a/docs/podman-rmi.1.md b/docs/podman-rmi.1.md
index 9c080c9f1..8c22bba2c 100644
--- a/docs/podman-rmi.1.md
+++ b/docs/podman-rmi.1.md
@@ -1,9 +1,11 @@
-% podman-rmi(1)
+% podman-image-rm(1)
## NAME
-podman\-rmi - Removes one or more images
+podman\-image\-rm (podman\-rmi) - Removes one or more images
## SYNOPSIS
+**podman image rm** *image* ...
+
**podman rmi** *image* ...
## DESCRIPTION
@@ -38,6 +40,10 @@ Remove all images and containers.
```
podman rmi -a -f
```
+## Exit Status
+**_0_** if all specified images removed
+**_1_** if one of the specified images did not exist, and no other failures
+**_125_** if command fails for a reason other then an image did not exist
## SEE ALSO
podman(1)
diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md
index 828ae96a8..bbf10a2ce 100644
--- a/docs/podman-run.1.md
+++ b/docs/podman-run.1.md
@@ -28,6 +28,8 @@ servers in the created `resolv.conf`). Additionally, an empty file is created in
each container to indicate to programs they are running in a container. This file
is located at `/run/.containerenv`.
+When running from a user defined network namespace, the /etc/netns/NSNAME/resolv.conf will be used if it exists, otherwise /etc/resolv.conf will be used.
+
## OPTIONS
**--add-host**=[]
@@ -449,6 +451,7 @@ Tune the container's pids limit. Set `-1` to have unlimited pids for the contain
Run container in an existing pod. If you want podman to make the pod for you, preference the pod name with `new:`.
To make a pod with more granular options, use the `podman pod create` command before creating a container.
+If a container is run with a pod, and the pod has an infra-container, the infra-container will be started before the container is.
**--privileged**=*true*|*false*
@@ -663,7 +666,7 @@ Without this argument the command will be run as root in the container.
**--userns**=host
**--userns**=ns:my_namespace
-Set the user namespace for the container.
+Set the user namespace mode for the container. The use of userns is disabled by default.
- `host`: run in the user namespace of the caller. This is the default if no user namespace options are set. The processes running in the container will have the same privileges on the host as any other process launched by the calling user.
- `ns`: run the container in the given existing user namespace.
@@ -675,7 +678,7 @@ This option is incompatible with --gidmap, --uidmap, --subuid and --subgid
Set the UTS mode for the container
`host`: use the host's UTS namespace inside the container.
-`ns`: specify the usernamespace to use.
+`ns`: specify the user namespace to use.
**NOTE**: the host mode gives the container access to changing the host's hostname and is therefore considered insecure.
@@ -693,21 +696,35 @@ Current supported mount TYPES are bind, and tmpfs.
Common Options:
- · src, source: mount source spec for bind and volume. Mandatory for bind.
+ · src, source: mount source spec for bind and volume. Mandatory for bind.
- · dst, destination, target: mount destination spec.
+ · dst, destination, target: mount destination spec.
- · ro, read-only: true or false (default).
+ · ro, read-only: true or false (default).
Options specific to bind:
- · bind-propagation: Z, z, shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2).
+ · bind-propagation: Z, z, shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2).
Options specific to tmpfs:
- · tmpfs-size: Size of the tmpfs mount in bytes. Unlimited by default in Linux.
+ · tmpfs-size: Size of the tmpfs mount in bytes. Unlimited by default in Linux.
+
+ · tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
+
+**--userns**=""
+
+Set the user namespace mode for the container. The use of userns is disabled by default.
- · tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
+ **host**: use the host user namespace and enable all privileged options (e.g., `pid=host` or `--privileged`).
+ **ns**: specify the user namespace to use.
+
+**--uts**=*host*
+
+Set the UTS mode for the container
+ **host**: use the host's UTS namespace inside the container.
+ **ns**: specify the user namespace to use.
+ Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure.
**--volume**, **-v**[=*[HOST-DIR:CONTAINER-DIR[:OPTIONS]]*]
@@ -1074,8 +1091,8 @@ supported sysctls.
### Set UID/GID mapping in a new user namespace
-If you want to run the container in a new user namespace and define the mapping of
-the uid and gid from the host.
+Running a container in a new user namespace requires a mapping of
+the uids and gids from the host.
```
$ podman run --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello
@@ -1096,13 +1113,27 @@ KillMode=process
WantedBy=multi-user.target
```
+### Rootless Containers
+
+Podman runs as a non root user on most systems. This feature requires that a new enough version of shadow-utils
+be installed. The shadow-utils package must include the newuidmap and newgidmap executables.
+
+Note: RHEL7 and Centos 7 will not have this feature until RHEL7.7 is released.
+
+In order for users to run rootless, there must be an entry for their username in /etc/subuid and /etc/subgid which lists the UIDs for their user namespace.
+
+Rootless podman works better if the fuse-overlayfs and slirp4netns packages are installed.
+The fuse-overlay package provides a userspace overlay storage driver, otherwise users need to use
+the vfs storage driver, which is diskspace expensive and does not perform well. slirp4netns is
+required for VPN, without it containers need to be run with the --net=host flag.
+
## FILES
**/etc/subuid**
**/etc/subgid**
## SEE ALSO
-subgid(5), subuid(5), libpod.conf(5), systemd.unit(5), setsebool(8)
+subgid(5), subuid(5), libpod.conf(5), systemd.unit(5), setsebool(8), slirp4netns(1), fuse-overlayfs(1)
## HISTORY
September 2018, updated by Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
diff --git a/docs/podman-start.1.md b/docs/podman-start.1.md
index f16a20efa..b0167003e 100644
--- a/docs/podman-start.1.md
+++ b/docs/podman-start.1.md
@@ -8,7 +8,7 @@ podman\-start - Start one or more containers
## DESCRIPTION
Start one or more containers. You may use container IDs or names as input. The *attach* and *interactive*
-options cannot be used to override the *--tty** and *--interactive* options from when the container
+options cannot be used to override the *--tty* and *--interactive* options from when the container
was created. If you attempt to start a running container with the *--attach* option, podman will simply
attach to the container.
@@ -33,6 +33,8 @@ Attach container's STDIN. The default is false.
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--sig-proxy**=*true*|*false*
Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true* when attaching, *false* otherwise.
@@ -41,9 +43,9 @@ Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and
podman start mywebserver
-podman start 860a4b23 5421ab4
+podman start 860a4b231279 5421ab43b45
-podman start -i -a 860a4b23
+podman start --interactive --attach 860a4b231279
podman start -i -l
diff --git a/docs/podman-stats.1.md b/docs/podman-stats.1.md
index 8fc765326..97a9be961 100644
--- a/docs/podman-stats.1.md
+++ b/docs/podman-stats.1.md
@@ -20,6 +20,8 @@ Show all containers. Only running containers are shown by default
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--no-reset**
Do not clear the terminal/screen in between reporting intervals
@@ -36,16 +38,17 @@ Valid placeholders for the Go template are listed below:
| **Placeholder** | **Description** |
| --------------- | --------------- |
-| .ID | Container ID |
+| .Pod | Pod ID |
+| .CID | Container ID |
| .Name | Container Name |
-| .CPUPerc | CPU percentage |
+| .CPU | CPU percentage |
| .MemUsage | Memory usage |
-| .MemPerc | Memory percentage |
+| .Mem | Memory percentage |
| .NetIO | Network IO |
| .BlockIO | Block IO |
| .PIDS | Number of PIDs |
-
+When using a GO template, you may preceed the format with `table` to print headers.
## EXAMPLE
```
diff --git a/docs/podman-stop.1.md b/docs/podman-stop.1.md
index 813f0ef9e..2016a7301 100644
--- a/docs/podman-stop.1.md
+++ b/docs/podman-stop.1.md
@@ -24,6 +24,8 @@ Stop all running containers. This does not include paused containers.
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
**--timeout, --time, t**
Timeout to wait before forcibly stopping the container
@@ -32,11 +34,11 @@ Timeout to wait before forcibly stopping the container
podman stop mywebserver
-podman stop 860a4b23
+podman stop 860a4b235279
-podman stop mywebserver 860a4b23
+podman stop mywebserver 860a4b235279
-podman stop --timeout 2 860a4b23
+podman stop --timeout 2 860a4b235279
podman stop -a
diff --git a/docs/podman-system-renumber.1.md b/docs/podman-system-renumber.1.md
new file mode 100644
index 000000000..a88640d63
--- /dev/null
+++ b/docs/podman-system-renumber.1.md
@@ -0,0 +1,29 @@
+% podman-system-renumber(1) podman
+
+## NAME
+podman\-system\-renumber - Renumber container locks
+
+## SYNOPSIS
+** podman system renumber**
+
+## DESCRIPTION
+** podman system renumber** renumbers locks used by containers and pods.
+
+Each Podman container and pod is allocated a lock at creation time, up to a maximum number controlled by the **num_locks** parameter in **libpod.conf**.
+
+When all available locks are exhausted, no further containers and pods can be created until some existing containers and pods are removed. This can be avoided by increasing the number of locks available via modifying **libpod.conf** and subsequently running **podman system renumber** to prepare the new locks (and reallocate lock numbers to fit the new struct).
+
+**podman system renumber** must be called after any changes to **num_locks** - failure to do so will result in errors starting Podman as the number of locks available conflicts with the configured number of locks.
+
+**podman system renumber** can also be used to migrate 1.0 and earlier versions of Podman, which used a different locking scheme, to the new locking model. It is not strictly required to do this, but it is highly recommended to do so as deadlocks can occur otherwise.
+
+If possible, avoid calling **podman system renumber** while there are other Podman processes running.
+
+## SYNOPSIS
+**podman system renumber**
+
+## SEE ALSO
+`podman(1)`, `libpod.conf(5)`
+
+# HISTORY
+February 2019, Originally compiled by Matt Heon (mheon at redhat dot com)
diff --git a/docs/podman-top.1.md b/docs/podman-top.1.md
index 7782a3f29..52d1238ef 100644
--- a/docs/podman-top.1.md
+++ b/docs/podman-top.1.md
@@ -20,6 +20,8 @@ Display the running process of the container. The *format-descriptors* are ps (1
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
## FORMAT DESCRIPTORS
The following descriptors are supported in addition to the AIX format descriptors mentioned in ps (1):
diff --git a/docs/podman-umount.1.md b/docs/podman-umount.1.md
index cceb63019..795f0402d 100644
--- a/docs/podman-umount.1.md
+++ b/docs/podman-umount.1.md
@@ -35,6 +35,8 @@ Instead of providing the container name or ID, use the last created container.
If you use methods other than Podman to run containers such as CRI-O, the last
started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
## EXAMPLE
podman umount containerID
diff --git a/docs/podman-wait.1.md b/docs/podman-wait.1.md
index dd5dc7907..672316eef 100644
--- a/docs/podman-wait.1.md
+++ b/docs/podman-wait.1.md
@@ -22,9 +22,11 @@ After the container stops, the container's return code is printed.
**--latest, -l**
- Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
+Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods.
+The latest option is not supported on the remote client.
+
## EXAMPLES
```
diff --git a/docs/podman.1.md b/docs/podman.1.md
index 51ef00383..760f27310 100644
--- a/docs/podman.1.md
+++ b/docs/podman.1.md
@@ -168,6 +168,7 @@ the exit codes follow the `chroot` standard, see below:
| [podman-umount(1)](podman-umount.1.md) | Unmount a working container's root filesystem. |
| [podman-unpause(1)](podman-unpause.1.md) | Unpause one or more containers. |
| [podman-version(1)](podman-version.1.md) | Display the Podman version information. |
+| [podman-volume(1)](podman-volume.1.md) | Manage Volumes. |
| [podman-wait(1)](podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes. |
## FILES
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
index b058b4273..3c2d193af 100755
--- a/hack/get_ci_vm.sh
+++ b/hack/get_ci_vm.sh
@@ -17,8 +17,14 @@ MEMORY="4Gb"
DISK="200"
PROJECT="libpod-218412"
GOSRC="/var/tmp/go/src/github.com/containers/libpod"
+GCLOUD_IMAGE=${GCLOUD_IMAGE:-quay.io/cevich/gcloud_centos:latest}
+GCLOUD_SUDO=${GCLOUD_SUDO-sudo}
+
+# Shared tmp directory between container and us
+TMPDIR=$(mktemp -d --tmpdir $(basename $0)_tmpdir_XXXXXX)
+
# Command shortcuts save some typing
-PGCLOUD="sudo podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v /home/$USER:$HOME -v /tmp:/tmp:ro quay.io/cevich/gcloud_centos:latest --configuration=libpod --project=$PROJECT"
+PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v /home/$USER:$HOME -v $TMPDIR:/tmp $GCLOUD_IMAGE --configuration=libpod --project=$PROJECT"
SCP_CMD="$PGCLOUD compute scp"
LIBPODROOT=$(realpath "$(dirname $0)/../")
@@ -39,19 +45,20 @@ showrun() {
fi
}
-TEMPFILE=$(mktemp -p '' $(basename $0)_XXXXX.tar.bz2)
cleanup() {
set +e
wait
- rm -f "$TEMPFILE"
+
+ # set GCLOUD_DEBUG to leave tmpdir behind for postmortem
+ test -z "$GCLOUD_DEBUG" && rm -rf $TMPDIR
}
trap cleanup EXIT
delvm() {
- cleanup
echo -e "\n"
echo -e "\n${YEL}Offering to Delete $VMNAME ${RED}(Might take a minute or two)${NOR}"
showrun $CLEANUP_CMD # prompts for Yes/No
+ cleanup
}
image_hints() {
@@ -128,16 +135,16 @@ parse_args $@
cd $LIBPODROOT
# Attempt to determine if named 'libpod' gcloud configuration exists
-showrun $PGCLOUD info > $TEMPFILE
-if egrep -q "Account:.*None" "$TEMPFILE"
+showrun $PGCLOUD info > $TMPDIR/gcloud-info
+if egrep -q "Account:.*None" $TMPDIR/gcloud-info
then
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for libpod, running init.${NOR}"
echo -e " ${RED}Please choose "#1: Re-initialize" and "login" if asked.${NOR}"
showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics
# Verify it worked (account name == someone@example.com)
- $PGCLOUD info > $TEMPFILE
- if egrep -q "Account:.*None" "$TEMPFILE"
+ $PGCLOUD info > $TMPDIR/gcloud-info-after-init
+ if egrep -q "Account:.*None" $TMPDIR/gcloud-info-after-init
then
echo -e "${RED}ERROR: Could not initialize libpod configuration in gcloud.${NOR}"
exit 5
@@ -150,8 +157,10 @@ then
fi
# Couldn't make rsync work with gcloud's ssh wrapper :(
+TARBALL_BASENAME=$VMNAME.tar.bz2
+TARBALL=/tmp/$TARBALL_BASENAME
echo -e "\n${YEL}Packing up repository into a tarball $VMNAME.${NOR}"
-showrun --background tar cjf $TEMPFILE --warning=no-file-changed -C $LIBPODROOT .
+showrun --background tar cjf $TMPDIR/$TARBALL_BASENAME --warning=no-file-changed -C $LIBPODROOT .
trap delvm INT # Allow deleting VM if CTRL-C during create
# This fails if VM already exists: permit this usage to re-init
@@ -186,13 +195,13 @@ showrun $SSH_CMD --command "mkdir -p $GOSRC"
echo -e "\n${YEL}Transfering tarball to $VMNAME.${NOR}"
wait
-showrun $SCP_CMD $TEMPFILE root@$VMNAME:$TEMPFILE
+showrun $SCP_CMD $TARBALL root@$VMNAME:$TARBALL
echo -e "\n${YEL}Unpacking tarball into $GOSRC on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "tar xjf $TEMPFILE -C $GOSRC"
+showrun $SSH_CMD --command "tar xjf $TARBALL -C $GOSRC"
echo -e "\n${YEL}Removing tarball on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "rm -f $TEMPFILE"
+showrun $SSH_CMD --command "rm -f $TARBALL"
echo -e "\n${YEL}Executing environment setup${NOR}"
[[ "$1" == "-p" ]] && echo -e "${RED}Using package-based dependencies.${NOR}"
diff --git a/hack/tree_status.sh b/hack/tree_status.sh
index 12c1d1ee5..78fb4c6a3 100755
--- a/hack/tree_status.sh
+++ b/hack/tree_status.sh
@@ -8,6 +8,6 @@ then
else
echo "tree is dirty, please commit all changes and sync the vendor.conf"
echo ""
- echo $STATUS
+ echo "$STATUS"
exit 1
fi
diff --git a/libpod.conf b/libpod.conf
index acd6c8982..8d6158ed5 100644
--- a/libpod.conf
+++ b/libpod.conf
@@ -88,6 +88,14 @@ pause_command = "/pause"
# Default libpod support for container labeling
# label=true
+# Number of locks available for containers and pods.
+# If this is changed, a lock renumber must be performed (e.g. with the
+# 'podman system renumber' command).
+num_locks = 2048
+
+# Default OCI runtime
+runtime = "runc"
+
# Paths to look for a valid OCI runtime (runc, runv, etc)
[runtimes]
runc = [
diff --git a/libpod/adapter/runtime.go b/libpod/adapter/runtime.go
deleted file mode 100644
index 2c408dd2f..000000000
--- a/libpod/adapter/runtime.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// +build !remoteclient
-
-package adapter
-
-import (
- "context"
- "github.com/pkg/errors"
- "io"
- "io/ioutil"
- "os"
- "strconv"
-
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/image"
- "github.com/containers/libpod/pkg/rootless"
- "github.com/urfave/cli"
-)
-
-// LocalRuntime describes a typical libpod runtime
-type LocalRuntime struct {
- *libpod.Runtime
- Remote bool
-}
-
-// ContainerImage ...
-type ContainerImage struct {
- *image.Image
-}
-
-// Container ...
-type Container struct {
- *libpod.Container
-}
-
-// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cli.Context) (*LocalRuntime, error) {
- runtime, err := libpodruntime.GetRuntime(c)
- if err != nil {
- return nil, err
- }
- return &LocalRuntime{
- Runtime: runtime,
- }, nil
-}
-
-// GetImages returns a slice of images in containerimages
-func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
- var containerImages []*ContainerImage
- images, err := r.Runtime.ImageRuntime().GetImages()
- if err != nil {
- return nil, err
- }
- for _, i := range images {
- containerImages = append(containerImages, &ContainerImage{i})
- }
- return containerImages, nil
-
-}
-
-// NewImageFromLocal returns a containerimage representation of a image from local storage
-func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
- img, err := r.Runtime.ImageRuntime().NewFromLocal(name)
- if err != nil {
- return nil, err
- }
- return &ContainerImage{img}, nil
-}
-
-// LoadFromArchiveReference calls into local storage to load an image from an archive
-func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
- var containerImages []*ContainerImage
- imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer)
- if err != nil {
- return nil, err
- }
- for _, i := range imgs {
- ci := ContainerImage{i}
- containerImages = append(containerImages, &ci)
- }
- return containerImages, nil
-}
-
-// New calls into local storage to look for an image in local storage or to pull it
-func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
- img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, forcePull, label)
- if err != nil {
- return nil, err
- }
- return &ContainerImage{img}, nil
-}
-
-// RemoveImage calls into local storage and removes an image
-func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
- return r.Runtime.RemoveImage(ctx, img.Image, force)
-}
-
-// LookupContainer ...
-func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
- ctr, err := r.Runtime.LookupContainer(idOrName)
- if err != nil {
- return nil, err
- }
- return &Container{ctr}, nil
-}
-
-// PruneImages is wrapper into PruneImages within the image pkg
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return r.ImageRuntime().PruneImages(all)
-}
-
-// Export is a wrapper to container export to a tarfile
-func (r *LocalRuntime) Export(name string, path string) error {
- ctr, err := r.Runtime.LookupContainer(name)
- if err != nil {
- return errors.Wrapf(err, "error looking up container %q", name)
- }
- if os.Geteuid() != 0 {
- state, err := ctr.State()
- if err != nil {
- return errors.Wrapf(err, "cannot read container state %q", ctr.ID())
- }
- if state == libpod.ContainerStateRunning || state == libpod.ContainerStatePaused {
- data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
- if err != nil {
- return errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
- }
- conmonPid, err := strconv.Atoi(string(data))
- if err != nil {
- return errors.Wrapf(err, "cannot parse PID %q", data)
- }
- became, ret, err := rootless.JoinDirectUserAndMountNS(uint(conmonPid))
- if err != nil {
- return err
- }
- if became {
- os.Exit(ret)
- }
- } else {
- became, ret, err := rootless.BecomeRootInUserNS()
- if err != nil {
- return err
- }
- if became {
- os.Exit(ret)
- }
- }
- }
-
- return ctr.Export(path)
-}
-
-// Import is a wrapper to import a container image
-func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
- return r.Runtime.Import(ctx, source, reference, changes, history, quiet)
-}
diff --git a/libpod/adapter/runtime_remote.go b/libpod/adapter/runtime_remote.go
deleted file mode 100644
index c73e98c95..000000000
--- a/libpod/adapter/runtime_remote.go
+++ /dev/null
@@ -1,434 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "bufio"
- "context"
- "encoding/json"
- "fmt"
- "github.com/pkg/errors"
- "io"
- "os"
- "strings"
- "time"
-
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/image"
- "github.com/opencontainers/go-digest"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
- "github.com/varlink/go/varlink"
-)
-
-// ImageRuntime is wrapper for image runtime
-type RemoteImageRuntime struct{}
-
-// RemoteRuntime describes a wrapper runtime struct
-type RemoteRuntime struct {
- Conn *varlink.Connection
- Remote bool
-}
-
-// LocalRuntime describes a typical libpod runtime
-type LocalRuntime struct {
- *RemoteRuntime
-}
-
-// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cli.Context) (*LocalRuntime, error) {
- runtime := RemoteRuntime{}
- conn, err := runtime.Connect()
- if err != nil {
- return nil, err
- }
- rr := RemoteRuntime{
- Conn: conn,
- Remote: true,
- }
- foo := LocalRuntime{
- &rr,
- }
- return &foo, nil
-}
-
-// Shutdown is a bogus wrapper for compat with the libpod runtime
-func (r RemoteRuntime) Shutdown(force bool) error {
- return nil
-}
-
-// ContainerImage
-type ContainerImage struct {
- remoteImage
-}
-
-type remoteImage struct {
- ID string
- Labels map[string]string
- RepoTags []string
- RepoDigests []string
- Parent string
- Size int64
- Created time.Time
- InputName string
- Names []string
- Digest digest.Digest
- isParent bool
- Runtime *LocalRuntime
-}
-
-// Container ...
-type Container struct {
- remoteContainer
-}
-
-// remoteContainer ....
-type remoteContainer struct {
- Runtime *LocalRuntime
- config *libpod.ContainerConfig
- state *libpod.ContainerState
-}
-
-// GetImages returns a slice of containerimages over a varlink connection
-func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
- var newImages []*ContainerImage
- images, err := iopodman.ListImages().Call(r.Conn)
- if err != nil {
- return nil, err
- }
- for _, i := range images {
- name := i.Id
- if len(i.RepoTags) > 1 {
- name = i.RepoTags[0]
- }
- newImage, err := imageInListToContainerImage(i, name, r)
- if err != nil {
- return nil, err
- }
- newImages = append(newImages, newImage)
- }
- return newImages, nil
-}
-
-func imageInListToContainerImage(i iopodman.ImageInList, name string, runtime *LocalRuntime) (*ContainerImage, error) {
- created, err := splitStringDate(i.Created)
- if err != nil {
- return nil, err
- }
- ri := remoteImage{
- InputName: name,
- ID: i.Id,
- Labels: i.Labels,
- RepoTags: i.RepoTags,
- RepoDigests: i.RepoTags,
- Parent: i.ParentId,
- Size: i.Size,
- Created: created,
- Names: i.RepoTags,
- isParent: i.IsParent,
- Runtime: runtime,
- }
- return &ContainerImage{ri}, nil
-}
-
-// NewImageFromLocal returns a container image representation of a image over varlink
-func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
- img, err := iopodman.GetImage().Call(r.Conn, name)
- if err != nil {
- return nil, err
- }
- return imageInListToContainerImage(img, name, r)
-
-}
-
-// LoadFromArchiveReference creates an image from a local archive
-func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
- // TODO We need to find a way to leak certDir, creds, and the tlsverify into this function, normally this would
- // come from cli options but we don't want want those in here either.
- imageID, err := iopodman.PullImage().Call(r.Conn, srcRef.DockerReference().String(), "", "", signaturePolicyPath, true)
- if err != nil {
- return nil, err
- }
- newImage, err := r.NewImageFromLocal(imageID)
- if err != nil {
- return nil, err
- }
- return []*ContainerImage{newImage}, nil
-}
-
-// New calls into local storage to look for an image in local storage or to pull it
-func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
- if label != nil {
- return nil, errors.New("the remote client function does not support checking a remote image for a label")
- }
- // TODO Creds needs to be figured out here too, like above
- tlsBool := dockeroptions.DockerInsecureSkipTLSVerify
- // Remember SkipTlsVerify is the opposite of tlsverify
- // If tlsBook is true or undefined, we do not skip
- SkipTlsVerify := false
- if tlsBool == types.OptionalBoolFalse {
- SkipTlsVerify = true
- }
- imageID, err := iopodman.PullImage().Call(r.Conn, name, dockeroptions.DockerCertPath, "", signaturePolicyPath, SkipTlsVerify)
- if err != nil {
- return nil, err
- }
- newImage, err := r.NewImageFromLocal(imageID)
- if err != nil {
- return nil, err
- }
- return newImage, nil
-}
-
-func splitStringDate(d string) (time.Time, error) {
- fields := strings.Fields(d)
- t := fmt.Sprintf("%sT%sZ", fields[0], fields[1])
- return time.ParseInLocation(time.RFC3339Nano, t, time.UTC)
-}
-
-// IsParent goes through the layers in the store and checks if i.TopLayer is
-// the parent of any other layer in store. Double check that image with that
-// layer exists as well.
-func (ci *ContainerImage) IsParent() (bool, error) {
- return ci.remoteImage.isParent, nil
-}
-
-// ID returns the image ID as a string
-func (ci *ContainerImage) ID() string {
- return ci.remoteImage.ID
-}
-
-// Names returns a string array of names associated with the image
-func (ci *ContainerImage) Names() []string {
- return ci.remoteImage.Names
-}
-
-// Created returns the time the image was created
-func (ci *ContainerImage) Created() time.Time {
- return ci.remoteImage.Created
-}
-
-// Size returns the size of the image
-func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) {
- usize := uint64(ci.remoteImage.Size)
- return &usize, nil
-}
-
-// Digest returns the image's digest
-func (ci *ContainerImage) Digest() digest.Digest {
- return ci.remoteImage.Digest
-}
-
-// Labels returns a map of the image's labels
-func (ci *ContainerImage) Labels(ctx context.Context) (map[string]string, error) {
- return ci.remoteImage.Labels, nil
-}
-
-// Dangling returns a bool if the image is "dangling"
-func (ci *ContainerImage) Dangling() bool {
- return len(ci.Names()) == 0
-}
-
-// TagImage ...
-func (ci *ContainerImage) TagImage(tag string) error {
- _, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag)
- return err
-}
-
-// RemoveImage calls varlink to remove an image
-func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
- return iopodman.RemoveImage().Call(r.Conn, img.InputName, force)
-}
-
-// History returns the history of an image and its layers
-func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) {
- var imageHistories []*image.History
-
- reply, err := iopodman.HistoryImage().Call(ci.Runtime.Conn, ci.InputName)
- if err != nil {
- return nil, err
- }
- for _, h := range reply {
- created, err := splitStringDate(h.Created)
- if err != nil {
- return nil, err
- }
- ih := image.History{
- ID: h.Id,
- Created: &created,
- CreatedBy: h.CreatedBy,
- Size: h.Size,
- Comment: h.Comment,
- }
- imageHistories = append(imageHistories, &ih)
- }
- return imageHistories, nil
-}
-
-// LookupContainer gets basic information about container over a varlink
-// connection and then translates it to a *Container
-func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
- state, err := r.ContainerState(idOrName)
- if err != nil {
- return nil, err
- }
- config := r.Config(idOrName)
- if err != nil {
- return nil, err
- }
-
- rc := remoteContainer{
- r,
- config,
- state,
- }
-
- c := Container{
- rc,
- }
- return &c, nil
-}
-
-func (r *LocalRuntime) GetLatestContainer() (*Container, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// ContainerState returns the "state" of the container.
-func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, error) { //no-lint
- reply, err := iopodman.ContainerStateData().Call(r.Conn, name)
- if err != nil {
- return nil, err
- }
- data := libpod.ContainerState{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return &data, err
-
-}
-
-// Config returns a container config
-func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig {
- // TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer
- // further looking into it for after devconf.
- // The libpod function for this has no errors so we are kind of in a tough
- // spot here. Logging the errors for now.
- reply, err := iopodman.ContainerConfig().Call(r.Conn, name)
- if err != nil {
- logrus.Error("call to container.config failed")
- }
- data := libpod.ContainerConfig{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- logrus.Error("failed to unmarshal container inspect data")
- }
- return &data
-
-}
-
-// PruneImages is the wrapper call for a remote-client to prune images
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return iopodman.ImagesPrune().Call(r.Conn, all)
-}
-
-// Export is a wrapper to container export to a tarfile
-func (r *LocalRuntime) Export(name string, path string) error {
- tempPath, err := iopodman.ExportContainer().Call(r.Conn, name, "")
- if err != nil {
- return err
- }
-
- outputFile, err := os.Create(path)
- if err != nil {
- return err
- }
- defer outputFile.Close()
-
- writer := bufio.NewWriter(outputFile)
- defer writer.Flush()
-
- reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, tempPath, true)
- if err != nil {
- return err
- }
-
- length, _, err := reply()
- if err != nil {
- return errors.Wrap(err, "unable to get file length for transfer")
- }
-
- reader := r.Conn.Reader
- if _, err := io.CopyN(writer, reader, length); err != nil {
- return errors.Wrap(err, "file transer failed")
- }
-
- return nil
-}
-
-// Import implements the remote calls required to import a container image to the store
-func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
- // First we send the file to the host
- fs, err := os.Open(source)
- if err != nil {
- return "", err
- }
-
- fileInfo, err := fs.Stat()
- if err != nil {
- return "", err
- }
- reply, err := iopodman.SendFile().Send(r.Conn, varlink.Upgrade, "", int64(fileInfo.Size()))
- if err != nil {
- return "", err
- }
- _, _, err = reply()
- if err != nil {
- return "", err
- }
-
- reader := bufio.NewReader(fs)
- _, err = reader.WriteTo(r.Conn.Writer)
- if err != nil {
- return "", err
- }
- r.Conn.Writer.Flush()
-
- // All was sent, wait for the ACK from the server
- tempFile, err := r.Conn.Reader.ReadString(':')
- if err != nil {
- return "", err
- }
-
- // r.Conn is kaput at this point due to the upgrade
- if err := r.RemoteRuntime.RefreshConnection(); err != nil {
- return "", err
-
- }
- return iopodman.ImportImage().Call(r.Conn, strings.TrimRight(tempFile, ":"), reference, history, changes, true)
-}
-
-// GetAllVolumes retrieves all the volumes
-func (r *LocalRuntime) GetAllVolumes() ([]*libpod.Volume, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// RemoveVolume removes a volumes
-func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force, prune bool) error {
- return libpod.ErrNotImplemented
-}
-
-// GetContainers retrieves all containers from the state
-// Filters can be provided which will determine what containers are included in
-// the output. Multiple filters are handled by ANDing their output, so only
-// containers matching all filters are returned
-func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libpod.Container, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// RemoveContainer removes the given container
-// If force is specified, the container will be stopped first
-// Otherwise, RemoveContainer will return an error if the container is running
-func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force bool) error {
- return libpod.ErrNotImplemented
-}
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 5bc15dd7f..25ef5cd0e 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -783,6 +783,94 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// RewriteContainerConfig rewrites a container's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
+ if ctrDB == nil {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ }
+
+ if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ podBkt, err := getPodBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ podDB := podBkt.Bucket([]byte(pod.ID()))
+ if podDB == nil {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ }
+
+ if err := podDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
@@ -1281,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return ErrDBClosed
}
- if !volume.valid {
- return ErrVolumeRemoved
- }
-
volName := []byte(volume.Name())
db, err := s.getDBCon()
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index ea150cfac..3d749849d 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
- // Get the lock
- lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
- if err != nil {
- return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
- }
- volume.lock = lock
-
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/container.go b/libpod/container.go
index b0589be3b..75f4a4a4f 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -358,8 +358,7 @@ type ContainerConfig struct {
ExitCommand []string `json:"exitCommand,omitempty"`
// LocalVolumes are the built-in volumes we get from the --volumes-from flag
// It picks up the built-in volumes of the container used by --volumes-from
- LocalVolumes []string
-
+ LocalVolumes []spec.Mount
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`
@@ -557,8 +556,16 @@ func (c *Container) NewNetNS() bool {
// PortMappings returns the ports that will be mapped into a container if
// a new network namespace is created
// If NewNetNS() is false, this value is unused
-func (c *Container) PortMappings() []ocicni.PortMapping {
- return c.config.PortMappings
+func (c *Container) PortMappings() ([]ocicni.PortMapping, error) {
+ // First check if the container belongs to a network namespace (like a pod)
+ if len(c.config.NetNsCtr) > 0 {
+ netNsCtr, err := c.runtime.LookupContainer(c.config.NetNsCtr)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to lookup network namespace for container %s", c.ID())
+ }
+ return netNsCtr.PortMappings()
+ }
+ return c.config.PortMappings, nil
}
// DNSServers returns DNS servers that will be used in the container's
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 149867759..09d7f220d 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -7,7 +7,6 @@ import (
"io/ioutil"
"os"
"strconv"
- "strings"
"time"
"github.com/containers/libpod/libpod/driver"
@@ -15,6 +14,7 @@ import (
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/daemon/caps"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
@@ -23,6 +23,10 @@ import (
// Init creates a container in the OCI runtime
func (c *Container) Init(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "containerInit")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -38,24 +42,15 @@ func (c *Container) Init(ctx context.Context) (err error) {
return errors.Wrapf(ErrCtrExists, "container %s has already been created in runtime", c.ID())
}
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ // don't recursively start
+ if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
}
- defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
- }
- }
- }()
-
if err := c.prepare(); err != nil {
+ if err2 := c.cleanup(ctx); err2 != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ }
return err
}
@@ -68,13 +63,18 @@ func (c *Container) Init(ctx context.Context) (err error) {
return c.init(ctx)
}
-// Start starts a container
-// Start can start configured, created or stopped containers
+// Start starts a container.
+// Start can start configured, created or stopped containers.
// For configured containers, the container will be initialized first, then
-// started
+// started.
// Stopped containers will be deleted and re-created in runc, undergoing a fresh
-// Init()
-func (c *Container) Start(ctx context.Context) (err error) {
+// Init().
+// If recursive is set, Start will also start all containers this container depends on.
+func (c *Container) Start(ctx context.Context, recursive bool) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "containerStart")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -83,64 +83,26 @@ func (c *Container) Start(ctx context.Context) (err error) {
return err
}
}
-
- // Container must be created or stopped to be started
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateCreated ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
- }
-
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
- }
-
- defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
- }
- }
- }()
-
- if err := c.prepare(); err != nil {
+ if err := c.prepareToStart(ctx, recursive); err != nil {
return err
}
- if c.state.State == ContainerStateStopped {
- // Reinitialize the container if we need to
- if err := c.reinit(ctx); err != nil {
- return err
- }
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
- // Or initialize it if necessary
- if err := c.init(ctx); err != nil {
- return err
- }
- }
-
// Start the container
return c.start()
}
-// StartAndAttach starts a container and attaches to it
-// StartAndAttach can start configured, created or stopped containers
+// StartAndAttach starts a container and attaches to it.
+// StartAndAttach can start configured, created or stopped containers.
// For configured containers, the container will be initialized first, then
-// started
+// started.
// Stopped containers will be deleted and re-created in runc, undergoing a fresh
-// Init()
+// Init().
// If successful, an error channel will be returned containing the result of the
// attach call.
// The channel will be closed automatically after the result of attach has been
-// sent
-func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize) (attachResChan <-chan error, err error) {
+// sent.
+// If recursive is set, StartAndAttach will also start all containers this container depends on.
+func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, recursive bool) (attachResChan <-chan error, err error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -150,48 +112,10 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
}
}
- // Container must be created or stopped to be started
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateCreated ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return nil, errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
- }
-
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return nil, errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return nil, errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
- }
-
- defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
- }
- }
- }()
-
- if err := c.prepare(); err != nil {
+ if err := c.prepareToStart(ctx, recursive); err != nil {
return nil, err
}
- if c.state.State == ContainerStateStopped {
- // Reinitialize the container if we need to
- if err := c.reinit(ctx); err != nil {
- return nil, err
- }
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
- // Or initialize it if necessary
- if err := c.init(ctx); err != nil {
- return nil, err
- }
- }
-
attachChan := make(chan error)
// Attach to the container before starting it
@@ -205,6 +129,24 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
return attachChan, nil
}
+// RestartWithTimeout restarts a running container and takes a given timeout in uint
+func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+ }
+
+ if err = c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
+ }
+
+ return c.restartWithTimeout(ctx, timeout)
+}
+
// Stop uses the container's stop signal (or SIGTERM if no signal was specified)
// to stop the container, and if it has not stopped after container's stop
// timeout, SIGKILL is used to attempt to forcibly stop the container
@@ -730,28 +672,6 @@ func (c *Container) Sync() error {
return nil
}
-// RestartWithTimeout restarts a running container and takes a given timeout in uint
-func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err error) {
- if !c.batched {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if err := c.syncContainer(); err != nil {
- return err
- }
- }
-
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
- }
- return c.restartWithTimeout(ctx, timeout)
-}
-
// Refresh refreshes a container's state in the database, restarting the
// container if it is running
func (c *Container) Refresh(ctx context.Context) error {
diff --git a/libpod/container_attach_linux.go b/libpod/container_attach_linux.go
index 1d6f0bd96..3ff6ddc76 100644
--- a/libpod/container_attach_linux.go
+++ b/libpod/container_attach_linux.go
@@ -109,8 +109,8 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
case err := <-receiveStdoutError:
return err
case err := <-stdinDone:
- if _, ok := err.(utils.DetachError); ok {
- return nil
+ if err == ErrDetach {
+ return err
}
if streams.AttachOutput || streams.AttachError {
return <-receiveStdoutError
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 026611e51..5c4fd1a31 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -162,7 +162,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
importBuilder.SetWorkDir(splitChange[1])
}
}
- candidates, _, err := util.ResolveName(destImage, "", sc, c.runtime.store)
+ candidates, _, _, err := util.ResolveName(destImage, "", sc, c.runtime.store)
if err != nil {
return nil, errors.Wrapf(err, "error resolving name %q", destImage)
}
diff --git a/libpod/container_graph.go b/libpod/container_graph.go
index 44a1f1736..da93be77d 100644
--- a/libpod/container_graph.go
+++ b/libpod/container_graph.go
@@ -1,6 +1,9 @@
package libpod
import (
+ "context"
+ "strings"
+
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -169,3 +172,97 @@ func detectCycles(graph *containerGraph) (bool, error) {
return false, nil
}
+
+// Visit a node on a container graph and start the container, or set an error if
+// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
+func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
+ // First, check if we have already visited the node
+ if ctrsVisited[node.id] {
+ return
+ }
+
+ // If setError is true, a dependency of us failed
+ // Mark us as failed and recurse
+ if setError {
+ // Mark us as visited, and set an error
+ ctrsVisited[node.id] = true
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
+
+ // Hit anyone who depends on us, and set errors on them too
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+ }
+
+ // Have all our dependencies started?
+ // If not, don't visit the node yet
+ depsVisited := true
+ for _, dep := range node.dependsOn {
+ depsVisited = depsVisited && ctrsVisited[dep.id]
+ }
+ if !depsVisited {
+ // Don't visit us yet, all dependencies are not up
+ // We'll hit the dependencies eventually, and when we do it will
+ // recurse here
+ return
+ }
+
+ // Going to try to start the container, mark us as visited
+ ctrsVisited[node.id] = true
+
+ ctrErrored := false
+
+ // Check if dependencies are running
+ // Graph traversal means we should have started them
+ // But they could have died before we got here
+ // Does not require that the container be locked, we only need to lock
+ // the dependencies
+ depsStopped, err := node.container.checkDependenciesRunning()
+ if err != nil {
+ ctrErrors[node.id] = err
+ ctrErrored = true
+ } else if len(depsStopped) > 0 {
+ // Our dependencies are not running
+ depsList := strings.Join(depsStopped, ",")
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ ctrErrored = true
+ }
+
+ // Lock before we start
+ node.container.lock.Lock()
+
+ // Sync the container to pick up current state
+ if !ctrErrored {
+ if err := node.container.syncContainer(); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+
+ // Start the container (only if it is not running)
+ if !ctrErrored {
+ if !restart && node.container.state.State != ContainerStateRunning {
+ if err := node.container.initAndStart(ctx); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
+ if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ }
+
+ node.container.lock.Unlock()
+
+ // Recurse to anyone who depends on us and start them
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index b0dcc853e..e3753d825 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -10,22 +10,18 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
- "github.com/containers/buildah/imagebuildah"
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/mount"
- "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/text/language"
@@ -252,6 +248,10 @@ func (c *Container) syncContainer() error {
// Create container root filesystem for use
func (c *Container) setupStorage(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "setupStorage")
+ span.SetTag("type", "container")
+ defer span.Finish()
+
if !c.valid {
return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
}
@@ -489,9 +489,20 @@ func (c *Container) removeConmonFiles() error {
return errors.Wrapf(err, "error removing container %s OOM file", c.ID())
}
+ // Instead of outright deleting the exit file, rename it (if it exists).
+ // We want to retain it so we can get the exit code of containers which
+ // are removed (at least until we have a workable events system)
exitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID())
- if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s exit file", c.ID())
+ oldExitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID()))
+ if _, err := os.Stat(exitFile); err != nil {
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
+ }
+ } else if err == nil {
+ // Rename should replace the old exit file (if it exists)
+ if err := os.Rename(exitFile, oldExitFile); err != nil {
+ return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
+ }
}
return nil
@@ -553,6 +564,160 @@ func (c *Container) save() error {
return nil
}
+// Checks the container is in the right state, then initializes the container in preparation to start the container.
+// If recursive is true, each of the containers dependencies will be started.
+// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
+func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
+ // Container must be created or stopped to be started
+ if !(c.state.State == ContainerStateConfigured ||
+ c.state.State == ContainerStateCreated ||
+ c.state.State == ContainerStateStopped ||
+ c.state.State == ContainerStateExited) {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ }
+
+ if !recursive {
+ if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
+ }
+ } else {
+ if err := c.startDependencies(ctx); err != nil {
+ return err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ if err2 := c.cleanup(ctx); err2 != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ }
+ }
+ }()
+
+ if err := c.prepare(); err != nil {
+ return err
+ }
+
+ if c.state.State == ContainerStateStopped {
+ // Reinitialize the container if we need to
+ if err := c.reinit(ctx); err != nil {
+ return err
+ }
+ } else if c.state.State == ContainerStateConfigured ||
+ c.state.State == ContainerStateExited {
+ // Or initialize it if necessary
+ if err := c.init(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// checks dependencies are running and prints a helpful message
+func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error {
+ notRunning, err := c.checkDependenciesRunning()
+ if err != nil {
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
+ }
+ if len(notRunning) > 0 {
+ depString := strings.Join(notRunning, ",")
+ return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ }
+
+ return nil
+}
+
+// Recursively start all dependencies of a container so the container can be started.
+func (c *Container) startDependencies(ctx context.Context) error {
+ depCtrIDs := c.Dependencies()
+ if len(depCtrIDs) == 0 {
+ return nil
+ }
+
+ depVisitedCtrs := make(map[string]*Container)
+ if err := c.getAllDependencies(depVisitedCtrs); err != nil {
+ return errors.Wrapf(err, "error starting dependency for container %s", c.ID())
+ }
+
+ // Because of how Go handles passing slices through functions, a slice cannot grow between function calls
+ // without clunky syntax. Circumnavigate this by translating the map to a slice for buildContainerGraph
+ depCtrs := make([]*Container, 0)
+ for _, ctr := range depVisitedCtrs {
+ depCtrs = append(depCtrs, ctr)
+ }
+
+ // Build a dependency graph of containers
+ graph, err := buildContainerGraph(depCtrs)
+ if err != nil {
+ return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID())
+ }
+
+ // If there are no containers without dependencies, we can't start
+ // Error out
+ if len(graph.noDepNodes) == 0 {
+ // we have no dependencies that need starting, go ahead and return
+ if len(graph.nodes) == 0 {
+ return nil
+ }
+ return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
+ }
+
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
+ // Traverse the graph beginning at nodes with no dependencies
+ for _, node := range graph.noDepNodes {
+ startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
+ }
+
+ if len(ctrErrors) > 0 {
+ logrus.Errorf("error starting some container dependencies")
+ for _, e := range ctrErrors {
+ logrus.Errorf("%q", e)
+ }
+ return errors.Wrapf(ErrInternal, "error starting some containers")
+ }
+ return nil
+}
+
+// getAllDependencies is a precursor to starting dependencies.
+// To start a container with all of its dependencies, we need to recursively find all dependencies
+// a container has, as well as each of those containers' dependencies, and so on
+// To do so, keep track of containers already visisted (so there aren't redundant state lookups),
+// and recursively search until we have reached the leafs of every dependency node.
+// Since we need to start all dependencies for our original container to successfully start, we propegate any errors
+// in looking up dependencies.
+// Note: this function is currently meant as a robust solution to a narrow problem: start an infra-container when
+// a container in the pod is run. It has not been tested for performance past one level, so expansion of recursive start
+// must be tested first.
+func (c *Container) getAllDependencies(visited map[string]*Container) error {
+ depIDs := c.Dependencies()
+ if len(depIDs) == 0 {
+ return nil
+ }
+ for _, depID := range depIDs {
+ if _, ok := visited[depID]; !ok {
+ dep, err := c.runtime.state.LookupContainer(depID)
+ if err != nil {
+ return err
+ }
+ status, err := dep.State()
+ if err != nil {
+ return err
+ }
+ // if the dependency is already running, we can assume its dependencies are also running
+ // so no need to add them to those we need to start
+ if status != ContainerStateRunning {
+ visited[depID] = dep
+ if err := dep.getAllDependencies(visited); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
// Check if a container's dependencies are running
// Returns a []string containing the IDs of dependencies that are not running
func (c *Container) checkDependenciesRunning() ([]string, error) {
@@ -630,6 +795,10 @@ func (c *Container) completeNetworkSetup() error {
// Initialize a container, creating it in the runtime
func (c *Container) init(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "init")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
// Generate the OCI spec
spec, err := c.generateSpec(ctx)
if err != nil {
@@ -663,6 +832,10 @@ func (c *Container) init(ctx context.Context) error {
// Deletes the container in the runtime, and resets its state to Exited.
// The container can be restarted cleanly after this.
func (c *Container) cleanupRuntime(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "cleanupRuntime")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
// If the container is not ContainerStateStopped, do nothing
if c.state.State != ContainerStateStopped {
return nil
@@ -698,6 +871,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// Not necessary for ContainerStateExited - the container has already been
// removed from the runtime, so init() can proceed freely.
func (c *Container) reinit(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "reinit")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.cleanupRuntime(ctx); err != nil {
@@ -930,6 +1107,10 @@ func (c *Container) cleanupStorage() error {
func (c *Container) cleanup(ctx context.Context) error {
var lastError error
+ span, _ := opentracing.StartSpanFromContext(ctx, "cleanup")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
logrus.Debugf("Cleaning up container %s", c.ID())
// Clean up network namespace, if present
@@ -961,6 +1142,10 @@ func (c *Container) cleanup(ctx context.Context) error {
// delete deletes the container and runs any configured poststop
// hooks.
func (c *Container) delete(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "delete")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if err := c.runtime.ociRuntime.deleteContainer(c); err != nil {
return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
}
@@ -976,6 +1161,10 @@ func (c *Container) delete(ctx context.Context) (err error) {
// the OCI Runtime Specification (which requires them to run
// post-delete, despite the stage name).
func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "postDeleteHooks")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if c.state.ExtensionStageHooks != nil {
extensionHooks, ok := c.state.ExtensionStageHooks["poststop"]
if ok {
@@ -1042,113 +1231,6 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
-func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
- var uid, gid int
- mountPoint := c.state.Mountpoint
- if !c.state.Mounted {
- return errors.Wrapf(ErrInternal, "container is not mounted")
- }
- newImage, err := c.runtime.imageRuntime.NewFromLocal(c.config.RootfsImageID)
- if err != nil {
- return err
- }
- imageData, err := newImage.Inspect(ctx)
- if err != nil {
- return err
- }
- // Add the built-in volumes of the container passed in to --volumes-from
- for _, vol := range c.config.LocalVolumes {
- if imageData.Config.Volumes == nil {
- imageData.Config.Volumes = map[string]struct{}{
- vol: {},
- }
- } else {
- imageData.Config.Volumes[vol] = struct{}{}
- }
- }
-
- if c.config.User != "" {
- if execUser == nil {
- return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
- }
- uid = execUser.Uid
- gid = execUser.Gid
- }
-
- for k := range imageData.Config.Volumes {
- mount := spec.Mount{
- Destination: k,
- Type: "bind",
- Options: []string{"private", "bind", "rw"},
- }
- if MountExists(g.Mounts(), k) {
- continue
- }
- volumePath := filepath.Join(c.config.StaticDir, "volumes", k)
-
- // Ensure the symlinks are resolved
- resolvedSymlink, err := imagebuildah.ResolveSymLink(mountPoint, k)
- if err != nil {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot resolve %s in %s for container %s", k, mountPoint, c.ID())
- }
- var srcPath string
- if resolvedSymlink != "" {
- srcPath = filepath.Join(mountPoint, resolvedSymlink)
- } else {
- srcPath = filepath.Join(mountPoint, k)
- }
-
- if _, err := os.Stat(srcPath); os.IsNotExist(err) {
- logrus.Infof("Volume image mount point %s does not exist in root FS, need to create it", k)
- if err = os.MkdirAll(srcPath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(srcPath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", srcPath, k, c.ID())
- }
- }
-
- if _, err := os.Stat(volumePath); os.IsNotExist(err) {
- if err = os.MkdirAll(volumePath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(volumePath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = label.Relabel(volumePath, c.config.MountLabel, false); err != nil {
- return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
- if err = chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, k, c.ID(), srcPath)
- }
-
- // Set the volume path with the same owner and permission of source path
- sstat, _ := os.Stat(srcPath)
- st, ok := sstat.Sys().(*syscall.Stat_t)
- if !ok {
- return fmt.Errorf("could not convert to syscall.Stat_t")
- }
- uid := int(st.Uid)
- gid := int(st.Gid)
-
- if err := os.Lchown(volumePath, uid, gid); err != nil {
- return err
- }
- if os.Chmod(volumePath, sstat.Mode()); err != nil {
- return err
- }
-
- }
-
- mount.Source = volumePath
- g.AddMount(mount)
- }
- return nil
-}
-
// Save OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
@@ -1292,3 +1374,30 @@ func getExcludedCGroups() (excludes []string) {
excludes = []string{"rdma"}
return
}
+
+// namedVolumes returns named volumes for the container
+func (c *Container) namedVolumes() ([]string, error) {
+ var volumes []string
+ for _, vol := range c.config.Spec.Mounts {
+ if strings.HasPrefix(vol.Source, c.runtime.config.VolumePath) {
+ volume := strings.TrimPrefix(vol.Source, c.runtime.config.VolumePath+"/")
+ split := strings.Split(volume, "/")
+ volume = split[0]
+ if _, err := c.runtime.state.Volume(volume); err == nil {
+ volumes = append(volumes, volume)
+ }
+ }
+ }
+ return volumes, nil
+}
+
+// this should be from chrootarchive.
+func (c *Container) copyWithTarFromImage(src, dest string) error {
+ mountpoint, err := c.mount()
+ if err != nil {
+ return err
+ }
+ a := archive.NewDefaultArchiver()
+ source := filepath.Join(mountpoint, src)
+ return a.CopyWithTar(source, dest)
+}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index bcdfdaee3..b074efa3a 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -26,11 +26,11 @@ import (
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/secrets"
"github.com/containers/storage/pkg/idtools"
- "github.com/mrunalp/fileutils"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -170,10 +170,15 @@ func (c *Container) cleanupNetwork() error {
// Generate spec for a container
// Accepts a map of the container's dependencies
func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "generateSpec")
+ span.SetTag("type", "container")
+ defer span.Finish()
+
execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, nil)
if err != nil {
return nil, err
}
+
g := generate.NewFromSpec(c.config.Spec)
// If network namespace was requested, add it now
@@ -235,13 +240,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
- // Bind builtin image volumes
- if c.config.Rootfs == "" && c.config.ImageVolumes {
- if err := c.addLocalVolumes(ctx, &g, execUser); err != nil {
- return nil, errors.Wrapf(err, "error mounting image volumes")
- }
- }
-
if c.config.User != "" {
// User and Group must go together
g.SetProcessUID(uint32(execUser.Uid))
@@ -483,6 +481,19 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
if c.state.State != ContainerStateRunning {
return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
+
+ // Create the CRIU log file and label it
+ dumpLog := filepath.Join(c.bundlePath(), "dump.log")
+
+ logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create CRIU log file %q", dumpLog)
+ }
+ logFile.Close()
+ if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
+ return errors.Wrapf(err, "failed to label CRIU log file %q", dumpLog)
+ }
+
if err := c.runtime.ociRuntime.checkpointContainer(c, options); err != nil {
return err
}
@@ -678,20 +689,12 @@ func (c *Container) makeBindMounts() error {
// If it doesn't, don't copy them
resolvPath, exists := bindMounts["/etc/resolv.conf"]
if exists {
- resolvDest := filepath.Join(c.state.RunDir, "resolv.conf")
- if err := fileutils.CopyFile(resolvPath, resolvDest); err != nil {
- return errors.Wrapf(err, "error copying resolv.conf from dependency container %s of container %s", depCtr.ID(), c.ID())
- }
- c.state.BindMounts["/etc/resolv.conf"] = resolvDest
- }
+ c.state.BindMounts["/etc/resolv.conf"] = resolvPath
+ }
hostsPath, exists := bindMounts["/etc/hosts"]
if exists {
- hostsDest := filepath.Join(c.state.RunDir, "hosts")
- if err := fileutils.CopyFile(hostsPath, hostsDest); err != nil {
- return errors.Wrapf(err, "error copying hosts file from dependency container %s of container %s", depCtr.ID(), c.ID())
- }
- c.state.BindMounts["/etc/hosts"] = hostsDest
+ c.state.BindMounts["/etc/hosts"] = hostsPath
}
} else {
newResolv, err := c.generateResolvConf()
@@ -706,6 +709,14 @@ func (c *Container) makeBindMounts() error {
}
c.state.BindMounts["/etc/hosts"] = newHosts
}
+
+ if err := label.Relabel(c.state.BindMounts["/etc/hosts"], c.config.MountLabel, true); err != nil {
+ return err
+ }
+
+ if err := label.Relabel(c.state.BindMounts["/etc/resolv.conf"], c.config.MountLabel, true); err != nil {
+ return err
+ }
}
// SHM is always added when we mount the container
@@ -759,8 +770,24 @@ func (c *Container) makeBindMounts() error {
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() (string, error) {
+ resolvConf := "/etc/resolv.conf"
+ for _, ns := range c.config.Spec.Linux.Namespaces {
+ if ns.Type == spec.NetworkNamespace {
+ if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
+ definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
+ _, err := os.Stat(definedPath)
+ if err == nil {
+ resolvConf = definedPath
+ } else if !os.IsNotExist(err) {
+ return "", errors.Wrapf(err, "failed to stat %s", definedPath)
+ }
+ }
+ break
+ }
+ }
+
// Determine the endpoint for resolv.conf in case it is a symlink
- resolvPath, err := filepath.EvalSymlinks("/etc/resolv.conf")
+ resolvPath, err := filepath.EvalSymlinks(resolvConf)
if err != nil {
return "", err
}
@@ -810,7 +837,7 @@ func (c *Container) generateResolvConf() (string, error) {
}
// Relabel resolv.conf for the container
- if err := label.Relabel(destPath, c.config.MountLabel, false); err != nil {
+ if err := label.Relabel(destPath, c.config.MountLabel, true); err != nil {
return "", err
}
diff --git a/libpod/errors.go b/libpod/errors.go
index d6614141c..dd82d0796 100644
--- a/libpod/errors.go
+++ b/libpod/errors.go
@@ -2,15 +2,21 @@ package libpod
import (
"errors"
+
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/utils"
)
var (
// ErrNoSuchCtr indicates the requested container does not exist
- ErrNoSuchCtr = errors.New("no such container")
+ ErrNoSuchCtr = image.ErrNoSuchCtr
+
// ErrNoSuchPod indicates the requested pod does not exist
- ErrNoSuchPod = errors.New("no such pod")
+ ErrNoSuchPod = image.ErrNoSuchPod
+
// ErrNoSuchImage indicates the requested image does not exist
- ErrNoSuchImage = errors.New("no such image")
+ ErrNoSuchImage = image.ErrNoSuchImage
+
// ErrNoSuchVolume indicates the requested volume does not exist
ErrNoSuchVolume = errors.New("no such volume")
@@ -51,6 +57,10 @@ var (
// ErrInternal indicates an internal library error
ErrInternal = errors.New("internal libpod error")
+ // ErrDetach indicates that an attach session was manually detached by
+ // the user.
+ ErrDetach = utils.ErrDetach
+
// ErrRuntimeStopped indicates that the runtime has already been shut
// down and no further operations can be performed on it
ErrRuntimeStopped = errors.New("runtime has already been stopped")
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 739372e77..b20419d7b 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -5,14 +5,18 @@ import (
"encoding/json"
"fmt"
"io"
+ "os"
"strings"
"syscall"
"time"
types2 "github.com/containernetworking/cni/pkg/types"
cp "github.com/containers/image/copy"
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ ociarchive "github.com/containers/image/oci/archive"
is "github.com/containers/image/storage"
"github.com/containers/image/tarball"
"github.com/containers/image/transports"
@@ -26,7 +30,9 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/reexec"
digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -126,6 +132,10 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) {
// New creates a new image object where the image could be local
// or remote
func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, forcePull bool, label *string) (*Image, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "newImage")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
// We don't know if the image is local or not ... check local first
newImage := Image{
InputName: name,
@@ -805,6 +815,10 @@ func (i *Image) imageInspectInfo(ctx context.Context) (*types.ImageInspectInfo,
// Inspect returns an image's inspect data
func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "imageInspect")
+ span.SetTag("type", "image")
+ defer span.Finish()
+
ociv1Img, err := i.ociv1Image(ctx)
if err != nil {
return nil, err
@@ -1075,3 +1089,65 @@ func (i *Image) Comment(ctx context.Context, manifestType string) (string, error
}
return ociv1Img.History[0].Comment, nil
}
+
+// Save writes a container image to the filesystem
+func (i *Image) Save(ctx context.Context, source, format, output string, moreTags []string, quiet, compress bool) error {
+ var (
+ writer io.Writer
+ destRef types.ImageReference
+ manifestType string
+ err error
+ )
+
+ if quiet {
+ writer = os.Stderr
+ }
+ switch format {
+ case "oci-archive":
+ destImageName := imageNameForSaveDestination(i, source)
+ destRef, err = ociarchive.NewReference(output, destImageName) // destImageName may be ""
+ if err != nil {
+ return errors.Wrapf(err, "error getting OCI archive ImageReference for (%q, %q)", output, destImageName)
+ }
+ case "oci-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = imgspecv1.MediaTypeImageManifest
+ case "docker-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = manifest.DockerV2Schema2MediaType
+ case "docker-archive", "":
+ dst := output
+ destImageName := imageNameForSaveDestination(i, source)
+ if destImageName != "" {
+ dst = fmt.Sprintf("%s:%s", dst, destImageName)
+ }
+ destRef, err = dockerarchive.ParseReference(dst) // FIXME? Add dockerarchive.NewReference
+ if err != nil {
+ return errors.Wrapf(err, "error getting Docker archive ImageReference for %q", dst)
+ }
+ default:
+ return errors.Errorf("unknown format option %q", format)
+ }
+ // supports saving multiple tags to the same tar archive
+ var additionaltags []reference.NamedTagged
+ if len(moreTags) > 0 {
+ additionaltags, err = GetAdditionalTags(moreTags)
+ if err != nil {
+ return err
+ }
+ }
+ if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil {
+ if err2 := os.Remove(output); err2 != nil {
+ logrus.Errorf("error deleting %q: %v", output, err)
+ }
+ return errors.Wrapf(err, "unable to save %q", source)
+ }
+
+ return nil
+}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 6fef96e37..6b9f7fc67 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -19,6 +19,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/pkg/registries"
multierror "github.com/hashicorp/go-multierror"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -109,6 +110,9 @@ func (ir *Runtime) getSinglePullRefPairGoal(srcRef types.ImageReference, destNam
// pullGoalFromImageReference returns a pull goal for a single ImageReference, depending on the used transport.
func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.ImageReference, imgName string, sc *types.SystemContext) (*pullGoal, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "pullGoalFromImageReference")
+ defer span.Finish()
+
// supports pulling from docker-archive, oci, and registries
switch srcRef.Transport().Name() {
case DockerArchive:
@@ -194,6 +198,9 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
// pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries.
// Use pullImageFromReference if the source is known precisely.
func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromHeuristicSource")
+ defer span.Finish()
+
var goal *pullGoal
sc := GetSystemContext(signaturePolicyPath, authfile, false)
srcRef, err := alltransports.ParseImageName(inputName)
@@ -214,6 +221,9 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
// pullImageFromReference pulls an image from a types.imageReference.
func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromReference")
+ defer span.Finish()
+
sc := GetSystemContext(signaturePolicyPath, authfile, false)
goal, err := ir.pullGoalFromImageReference(ctx, srcRef, transports.ImageName(srcRef), sc)
if err != nil {
@@ -224,6 +234,9 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage")
+ defer span.Finish()
+
policyContext, err := getPolicyContext(sc)
if err != nil {
return nil, err
diff --git a/libpod/image/search.go b/libpod/image/search.go
new file mode 100644
index 000000000..212eff00b
--- /dev/null
+++ b/libpod/image/search.go
@@ -0,0 +1,277 @@
+package image
+
+import (
+ "context"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containers/image/docker"
+ "github.com/containers/image/types"
+ "github.com/containers/libpod/libpod/common"
+ sysreg "github.com/containers/libpod/pkg/registries"
+ "github.com/fatih/camelcase"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+)
+
+const (
+ descriptionTruncLength = 44
+ maxQueries = 25
+ maxParallelSearches = int64(6)
+)
+
+// SearchResult is holding image-search related data.
+type SearchResult struct {
+ // Index is the image index (e.g., "docker.io" or "quay.io")
+ Index string
+ // Name is the canoncical name of the image (e.g., "docker.io/library/alpine").
+ Name string
+ // Description of the image.
+ Description string
+ // Stars is the number of stars of the image.
+ Stars int
+ // Official indicates if it's an official image.
+ Official string
+ // Automated indicates if the image was created by an automated build.
+ Automated string
+}
+
+// SearchOptions are used to control the behaviour of SearchImages.
+type SearchOptions struct {
+ // Filter allows to filter the results.
+ Filter SearchFilter
+ // Limit limits the number of queries per index (default: 25). Must be
+ // greater than 0 to overwrite the default value.
+ Limit int
+ // NoTrunc avoids the output to be truncated.
+ NoTrunc bool
+ // Authfile is the path to the authentication file.
+ Authfile string
+ // InsecureSkipTLSVerify allows to skip TLS verification.
+ InsecureSkipTLSVerify types.OptionalBool
+}
+
+// SearchFilter allows filtering the results of SearchImages.
+type SearchFilter struct {
+ // Stars describes the minimal amount of starts of an image.
+ Stars int
+ // IsAutomated decides if only images from automated builds are displayed.
+ IsAutomated types.OptionalBool
+ // IsOfficial decides if only official images are displayed.
+ IsOfficial types.OptionalBool
+}
+
+func splitCamelCase(src string) string {
+ entries := camelcase.Split(src)
+ return strings.Join(entries, " ")
+}
+
+// HeaderMap returns the headers of a SearchResult.
+func (s *SearchResult) HeaderMap() map[string]string {
+ v := reflect.Indirect(reflect.ValueOf(s))
+ values := make(map[string]string, v.NumField())
+
+ for i := 0; i < v.NumField(); i++ {
+ key := v.Type().Field(i).Name
+ value := key
+ values[key] = strings.ToUpper(splitCamelCase(value))
+ }
+ return values
+}
+
+// SearchImages searches images based on term and the specified SearchOptions
+// in all registries.
+func SearchImages(term string, options SearchOptions) ([]SearchResult, error) {
+ // Check if search term has a registry in it
+ registry, err := sysreg.GetRegistry(term)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting registry from %q", term)
+ }
+ if registry != "" {
+ term = term[len(registry)+1:]
+ }
+
+ registries, err := getRegistries(registry)
+ if err != nil {
+ return nil, err
+ }
+
+ // searchOutputData is used as a return value for searching in parallel.
+ type searchOutputData struct {
+ data []SearchResult
+ err error
+ }
+
+ // Let's follow Firefox by limiting parallel downloads to 6.
+ sem := semaphore.NewWeighted(maxParallelSearches)
+ wg := sync.WaitGroup{}
+ wg.Add(len(registries))
+ data := make([]searchOutputData, len(registries))
+
+ searchImageInRegistryHelper := func(index int, registry string) {
+ defer sem.Release(1)
+ defer wg.Done()
+ searchOutput, err := searchImageInRegistry(term, registry, options)
+ data[index] = searchOutputData{data: searchOutput, err: err}
+ }
+
+ ctx := context.Background()
+ for i := range registries {
+ sem.Acquire(ctx, 1)
+ go searchImageInRegistryHelper(i, registries[i])
+ }
+
+ wg.Wait()
+ results := []SearchResult{}
+ for _, d := range data {
+ if d.err != nil {
+ return nil, d.err
+ }
+ results = append(results, d.data...)
+ }
+ return results, nil
+}
+
+// getRegistries returns the list of registries to search, depending on an optional registry specification
+func getRegistries(registry string) ([]string, error) {
+ var registries []string
+ if registry != "" {
+ registries = append(registries, registry)
+ } else {
+ var err error
+ registries, err = sysreg.GetRegistries()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting registries to search")
+ }
+ }
+ return registries, nil
+}
+
+func searchImageInRegistry(term string, registry string, options SearchOptions) ([]SearchResult, error) {
+ // Max number of queries by default is 25
+ limit := maxQueries
+ if options.Limit > 0 {
+ limit = options.Limit
+ }
+
+ sc := common.GetSystemContext("", options.Authfile, false)
+ sc.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
+ // FIXME: Set this more globally. Probably no reason not to have it in
+ // every types.SystemContext, and to compute the value just once in one
+ // place.
+ sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath()
+ results, err := docker.SearchRegistry(context.TODO(), sc, registry, term, limit)
+ if err != nil {
+ logrus.Errorf("error searching registry %q: %v", registry, err)
+ return []SearchResult{}, nil
+ }
+ index := registry
+ arr := strings.Split(registry, ".")
+ if len(arr) > 2 {
+ index = strings.Join(arr[len(arr)-2:], ".")
+ }
+
+ // limit is the number of results to output
+ // if the total number of results is less than the limit, output all
+ // if the limit has been set by the user, output those number of queries
+ limit = maxQueries
+ if len(results) < limit {
+ limit = len(results)
+ }
+ if options.Limit != 0 && options.Limit < len(results) {
+ limit = options.Limit
+ }
+
+ paramsArr := []SearchResult{}
+ for i := 0; i < limit; i++ {
+ // Check whether query matches filters
+ if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) {
+ continue
+ }
+ official := ""
+ if results[i].IsOfficial {
+ official = "[OK]"
+ }
+ automated := ""
+ if results[i].IsAutomated {
+ automated = "[OK]"
+ }
+ description := strings.Replace(results[i].Description, "\n", " ", -1)
+ if len(description) > 44 && !options.NoTrunc {
+ description = description[:descriptionTruncLength] + "..."
+ }
+ name := registry + "/" + results[i].Name
+ if index == "docker.io" && !strings.Contains(results[i].Name, "/") {
+ name = index + "/library/" + results[i].Name
+ }
+ params := SearchResult{
+ Index: index,
+ Name: name,
+ Description: description,
+ Official: official,
+ Automated: automated,
+ Stars: results[i].StarCount,
+ }
+ paramsArr = append(paramsArr, params)
+ }
+ return paramsArr, nil
+}
+
+// ParseSearchFilter turns the filter into a SearchFilter that can be used for
+// searching images.
+func ParseSearchFilter(filter []string) (*SearchFilter, error) {
+ sFilter := new(SearchFilter)
+ for _, f := range filter {
+ arr := strings.Split(f, "=")
+ switch arr[0] {
+ case "stars":
+ if len(arr) < 2 {
+ return nil, errors.Errorf("invalid `stars` filter %q, should be stars=<value>", filter)
+ }
+ stars, err := strconv.Atoi(arr[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "incorrect value type for stars filter")
+ }
+ sFilter.Stars = stars
+ break
+ case "is-automated":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsAutomated = types.OptionalBoolFalse
+ } else {
+ sFilter.IsAutomated = types.OptionalBoolTrue
+ }
+ break
+ case "is-official":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsOfficial = types.OptionalBoolFalse
+ } else {
+ sFilter.IsOfficial = types.OptionalBoolTrue
+ }
+ break
+ default:
+ return nil, errors.Errorf("invalid filter type %q", f)
+ }
+ }
+ return sFilter, nil
+}
+
+func (f *SearchFilter) matchesStarFilter(result docker.SearchResult) bool {
+ return result.StarCount >= f.Stars
+}
+
+func (f *SearchFilter) matchesAutomatedFilter(result docker.SearchResult) bool {
+ if f.IsAutomated != types.OptionalBoolUndefined {
+ return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue)
+ }
+ return true
+}
+
+func (f *SearchFilter) matchesOfficialFilter(result docker.SearchResult) bool {
+ if f.IsOfficial != types.OptionalBoolUndefined {
+ return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue)
+ }
+ return true
+}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index ad027f32a..544796a4b 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -1,6 +1,7 @@
package image
import (
+ "fmt"
"io"
"net/url"
"regexp"
@@ -87,22 +88,29 @@ func hasTransport(image string) bool {
// ReposToMap parses the specified repotags and returns a map with repositories
// as keys and the corresponding arrays of tags as values.
-func ReposToMap(repotags []string) map[string][]string {
+func ReposToMap(repotags []string) (map[string][]string, error) {
// map format is repo -> tag
repos := make(map[string][]string)
for _, repo := range repotags {
var repository, tag string
if len(repo) > 0 {
- li := strings.LastIndex(repo, ":")
- repository = repo[0:li]
- tag = repo[li+1:]
+ named, err := reference.ParseNormalizedNamed(repo)
+ repository = named.Name()
+ if err != nil {
+ return nil, err
+ }
+ if ref, ok := named.(reference.NamedTagged); ok {
+ tag = ref.Tag()
+ } else if ref, ok := named.(reference.Canonical); ok {
+ tag = ref.Digest().String()
+ }
}
repos[repository] = append(repos[repository], tag)
}
if len(repos) == 0 {
repos["<none>"] = []string{"<none>"}
}
- return repos
+ return repos, nil
}
// GetAdditionalTags returns a list of reference.NamedTagged for the
@@ -141,3 +149,28 @@ func IsValidImageURI(imguri string) (bool, error) {
}
return true, nil
}
+
+// imageNameForSaveDestination returns a Docker-like reference appropriate for saving img,
+// which the user referred to as imgUserInput; or an empty string, if there is no appropriate
+// reference.
+func imageNameForSaveDestination(img *Image, imgUserInput string) string {
+ if strings.Contains(img.ID(), imgUserInput) {
+ return ""
+ }
+
+ prepend := ""
+ localRegistryPrefix := fmt.Sprintf("%s/", DefaultLocalRegistry)
+ if !strings.HasPrefix(imgUserInput, localRegistryPrefix) {
+ // we need to check if localhost was added to the image name in NewFromLocal
+ for _, name := range img.Names() {
+ // If the user is saving an image in the localhost registry, getLocalImage need
+ // a name that matches the format localhost/<tag1>:<tag2> or localhost/<tag>:latest to correctly
+ // set up the manifest and save.
+ if strings.HasPrefix(name, localRegistryPrefix) && (strings.HasSuffix(name, imgUserInput) || strings.HasSuffix(name, fmt.Sprintf("%s:latest", imgUserInput))) {
+ prepend = localRegistryPrefix
+ break
+ }
+ }
+ }
+ return fmt.Sprintf("%s%s", prepend, imgUserInput)
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 314799309..ab4fc8ba7 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -378,6 +378,58 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
return arr, nil
}
+// AllContainers retrieves all containers from the state
+func (s *InMemoryState) AllContainers() ([]*Container, error) {
+ ctrs := make([]*Container, 0, len(s.containers))
+ for _, ctr := range s.containers {
+ if s.namespace == "" || ctr.config.Namespace == s.namespace {
+ ctrs = append(ctrs, ctr)
+ }
+ }
+
+ return ctrs, nil
+}
+
+// RewriteContainerConfig rewrites a container's configuration.
+// This function is DANGEROUS, even with an in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ // If the container does not exist, return error
+ stateCtr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ }
+
+ stateCtr.config = newCfg
+
+ return nil
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// This function is DANGEROUS, even with in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ // If the pod does not exist, return error
+ statePod, ok := s.pods[pod.ID()]
+ if !ok {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID())
+ }
+
+ statePod.config = newCfg
+
+ return nil
+}
+
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
@@ -486,18 +538,6 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
return allVols, nil
}
-// AllContainers retrieves all containers from the state
-func (s *InMemoryState) AllContainers() ([]*Container, error) {
- ctrs := make([]*Container, 0, len(s.containers))
- for _, ctr := range s.containers {
- if s.namespace == "" || ctr.config.Namespace == s.namespace {
- ctrs = append(ctrs, ctr)
- }
- }
-
- return ctrs, nil
-}
-
// Pod retrieves a pod from the state from its full ID
func (s *InMemoryState) Pod(id string) (*Pod, error) {
if id == "" {
diff --git a/libpod/kube.go b/libpod/kube.go
index f34805e39..484127870 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -228,7 +228,11 @@ func containerToV1Container(c *Container) (v1.Container, error) {
return kubeContainer, nil
}
- ports, err := ocicniPortMappingToContainerPort(c.PortMappings())
+ portmappings, err := c.PortMappings()
+ if err != nil {
+ return kubeContainer, err
+ }
+ ports, err := ocicniPortMappingToContainerPort(portmappings)
if err != nil {
return kubeContainer, nil
}
@@ -401,7 +405,7 @@ func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) {
func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
priv := c.Privileged()
ro := c.IsReadOnly()
- allowPrivEscalation := !c.Spec().Process.NoNewPrivileges
+ allowPrivEscalation := !c.config.Spec.Process.NoNewPrivileges
newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities)
if err != nil {
@@ -421,7 +425,13 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
}
if c.User() != "" {
- // It is *possible* that
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
+ if err := c.syncContainer(); err != nil {
+ return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
+ }
logrus.Debugf("Looking in container for user: %s", c.User())
u, err := lookup.GetUser(c.state.Mountpoint, c.User())
if err != nil {
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index db8f20e95..7c9605917 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -89,3 +89,14 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
+
+// FreeAllLocks frees all locks.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *InMemoryManager) FreeAllLocks() error {
+ for _, lock := range m.locks {
+ lock.allocated = false
+ }
+
+ return nil
+}
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index 1f94171fe..d6841646b 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,6 +24,20 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // FreeAllLocks frees all allocated locks, in preparation for lock
+ // reallocation.
+ // As this deallocates all presently-held locks, this can be very
+ // dangerous - if there are other processes running that might be
+ // attempting to allocate new locks and free existing locks, we may
+ // encounter races leading to an inconsistent state.
+ // (This is in addition to the fact that FreeAllLocks instantly makes
+ // the state inconsistent simply by using it, and requires a full
+ // lock renumbering to restore consistency!).
+ // In short, this should only be used as part of unit tests, or lock
+ // renumbering, where reasonable guarantees about other processes can be
+ // made.
+ FreeAllLocks() error
}
// Locker is similar to sync.Locker, but provides a method for freeing the lock
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index 4af58d857..d11fce71a 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -203,6 +203,8 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// terminating NULL byte.
// Returns a valid pointer on success or NULL on error.
// If an error occurs, negative ERRNO values will be written to error_code.
+// ERANGE is returned for a mismatch between num_locks and the number of locks
+// available in the the SHM lock struct.
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
int shm_fd;
shm_struct_t *shm;
@@ -255,11 +257,11 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// Need to check the SHM to see if it's actually our locks
if (shm->magic != MAGIC) {
- *error_code = -1 * errno;
+ *error_code = -1 * EBADF;
goto CLEANUP;
}
if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
- *error_code = -1 * errno;
+ *error_code = -1 * ERANGE;
goto CLEANUP;
}
@@ -407,6 +409,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return 0;
}
+// Deallocate all semaphores unconditionally.
+// Returns negative ERRNO values.
+int32_t deallocate_all_semaphores(shm_struct_t *shm) {
+ int ret_code;
+ uint i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Iterate through all bitmaps and reset to unused
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ }
+
+ // Unlock the allocation control mutex
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 87d28e5c1..e70ea8743 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.deallocate_all_semaphores(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno return from C
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// LockSemaphore locks the given semaphore.
// If the semaphore is already locked, LockSemaphore will block until the lock
// can be acquired.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 8e7e23fb7..58e4297e2 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
index 594eb5d8e..830035881 100644
--- a/libpod/lock/shm/shm_lock_test.go
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
- "syscall"
"testing"
"time"
@@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
}
defer func() {
// Deallocate all locks
- // Ignore ENOENT (lock is not allocated)
- var i uint32
- for i = 0; i < numLocks; i++ {
- if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
- t.Fatalf("Error deallocating semaphore %d: %v", i, err)
- }
+ if err := locks.DeallocateAllSemaphores(); err != nil {
+ t.Fatalf("Error deallocating semaphores: %v", err)
}
if err := locks.Close(); err != nil {
@@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) {
})
}
+// Test that DeallocateAllSemaphores deallocates all semaphores
+func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate a lock
+ locks1, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Free all locks
+ err = locks.DeallocateAllSemaphores()
+ assert.NoError(t, err)
+
+ // Allocate another lock
+ locks2, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ assert.Equal(t, locks1, locks2)
+ })
+}
+
// Test that locks actually lock
func TestLockSemaphoreActuallyLocks(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 94dfd7dd7..8678958ee 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -71,6 +71,13 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *SHMLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllSemaphores()
+}
+
// SHMLock is an individual shared memory lock.
type SHMLock struct {
lockID uint32
diff --git a/libpod/lock/shm_lock_manager_unsupported.go b/libpod/lock/shm_lock_manager_unsupported.go
index cbdb2f7bc..1d6e3fcbd 100644
--- a/libpod/lock/shm_lock_manager_unsupported.go
+++ b/libpod/lock/shm_lock_manager_unsupported.go
@@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
return nil, fmt.Errorf("not supported")
}
+
+// FreeAllLocks is not supported on this platform
+func (m *SHMLockManager) FreeAllLocks() error {
+ return fmt.Errorf("not supported")
+}
diff --git a/libpod/oci.go b/libpod/oci.go
index e55bd57dc..26d2c6ef1 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -321,7 +321,6 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_USERNS_CONFIGURED=%s", os.Getenv("_LIBPOD_USERNS_CONFIGURED")))
cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%s", os.Getenv("_LIBPOD_ROOTLESS_UID")))
cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME")))
- cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() {
ports, err := bindPorts(ctr.config.PortMappings)
diff --git a/libpod/options.go b/libpod/options.go
index d965c058e..e22c81f91 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@@ -148,6 +149,7 @@ func WithOCIRuntime(runtime string) RuntimeOption {
}
rt.config.OCIRuntime = runtime
+ rt.config.RuntimePath = nil
return nil
}
@@ -393,6 +395,22 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
+// WithRenumber instructs libpod to perform a lock renumbering while
+// initializing. This will handle migrations from early versions of libpod with
+// file locks to newer versions with SHM locking, as well as changes in the
+// number of configured locks.
+func WithRenumber() RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.doRenumber = true
+
+ return nil
+ }
+}
+
// Container Creation Options
// WithShmDir sets the directory that should be mounted on /dev/shm.
@@ -886,10 +904,10 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo
}
ctr.config.PostConfigureNetNS = postConfigureNetNS
- ctr.config.CreateNetNS = true
+ ctr.config.NetMode = namespaces.NetworkMode(netmode)
+ ctr.config.CreateNetNS = !ctr.config.NetMode.IsUserDefined()
ctr.config.PortMappings = portMappings
ctr.config.Networks = networks
- ctr.config.NetMode = namespaces.NetworkMode(netmode)
return nil
}
@@ -1058,7 +1076,7 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
// from a container passed in to the --volumes-from flag.
// This stores the built-in volume information in the Config so we can
// add them when creating the container.
-func WithLocalVolumes(volumes []string) CtrCreateOption {
+func WithLocalVolumes(volumes []spec.Mount) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return ErrCtrFinalized
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 0f1f115e8..25e4e77d7 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -1,10 +1,8 @@
package libpod
import (
- "context"
"fmt"
"path/filepath"
- "strings"
"time"
"github.com/containers/storage/pkg/stringid"
@@ -85,97 +83,3 @@ func (p *Pod) refresh() error {
// Save changes
return p.save()
}
-
-// Visit a node on a container graph and start the container, or set an error if
-// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
-func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
- // First, check if we have already visited the node
- if ctrsVisited[node.id] {
- return
- }
-
- // If setError is true, a dependency of us failed
- // Mark us as failed and recurse
- if setError {
- // Mark us as visited, and set an error
- ctrsVisited[node.id] = true
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
-
- // Hit anyone who depends on us, and set errors on them too
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
- }
-
- return
- }
-
- // Have all our dependencies started?
- // If not, don't visit the node yet
- depsVisited := true
- for _, dep := range node.dependsOn {
- depsVisited = depsVisited && ctrsVisited[dep.id]
- }
- if !depsVisited {
- // Don't visit us yet, all dependencies are not up
- // We'll hit the dependencies eventually, and when we do it will
- // recurse here
- return
- }
-
- // Going to try to start the container, mark us as visited
- ctrsVisited[node.id] = true
-
- ctrErrored := false
-
- // Check if dependencies are running
- // Graph traversal means we should have started them
- // But they could have died before we got here
- // Does not require that the container be locked, we only need to lock
- // the dependencies
- depsStopped, err := node.container.checkDependenciesRunning()
- if err != nil {
- ctrErrors[node.id] = err
- ctrErrored = true
- } else if len(depsStopped) > 0 {
- // Our dependencies are not running
- depsList := strings.Join(depsStopped, ",")
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
- ctrErrored = true
- }
-
- // Lock before we start
- node.container.lock.Lock()
-
- // Sync the container to pick up current state
- if !ctrErrored {
- if err := node.container.syncContainer(); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
-
- // Start the container (only if it is not running)
- if !ctrErrored {
- if !restart && node.container.state.State != ContainerStateRunning {
- if err := node.container.initAndStart(ctx); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
- if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- }
-
- node.container.lock.Unlock()
-
- // Recurse to anyone who depends on us and start them
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
- }
-
- return
-}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index c975f628b..52f4523ba 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -7,6 +7,7 @@ import (
"os/exec"
"path/filepath"
"sync"
+ "syscall"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
@@ -79,7 +80,8 @@ type RuntimeOption func(*Runtime) error
// Runtime is the core libpod runtime
type Runtime struct {
- config *RuntimeConfig
+ config *RuntimeConfig
+
state State
store storage.Store
storageService *storageService
@@ -88,12 +90,23 @@ type Runtime struct {
netPlugin ocicni.CNIPlugin
ociRuntimePath OCIRuntimePath
conmonPath string
- valid bool
- lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
+
+ // doRenumber indicates that the runtime should perform a lock renumber
+ // during initialization.
+ // Once the runtime has been initialized and returned, this variable is
+ // unused.
+ doRenumber bool
+
+ // valid indicates whether the runtime is ready to use.
+ // valid is set to true when a runtime is returned from GetRuntime(),
+ // and remains true until the runtime is shut down (rendering its
+ // storage unusable). When valid is false, the runtime cannot be used.
+ valid bool
+ lock sync.RWMutex
}
// OCIRuntimePath contains information about an OCI runtime.
@@ -130,6 +143,12 @@ type RuntimeConfig struct {
OCIRuntime string `toml:"runtime"`
// OCIRuntimes are the set of configured OCI runtimes (default is runc)
OCIRuntimes map[string][]string `toml:"runtimes"`
+ // RuntimePath is the path to OCI runtime binary for launching
+ // containers.
+ // The first path pointing to a valid file will be used
+ // This is used only when there are no OCIRuntime/OCIRuntimes defined. It
+ // is used only to be backward compatible with older versions of Podman.
+ RuntimePath []string `toml:"runtime_path"`
// ConmonPath is the path to the Conmon binary used for managing
// containers
// The first path pointing to a valid file will be used
@@ -389,7 +408,7 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
// If the configuration file was not found but we are running in rootless, a subset of the
// global config file is used.
for _, path := range []string{OverrideConfigPath, ConfigPath} {
- contents, err := ioutil.ReadFile(OverrideConfigPath)
+ contents, err := ioutil.ReadFile(path)
if err != nil {
// Ignore any error, the file might not be readable by us.
continue
@@ -403,6 +422,7 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
runtime.config.ConmonPath = tmpConfig.ConmonPath
runtime.config.ConmonEnvVars = tmpConfig.ConmonEnvVars
runtime.config.OCIRuntimes = tmpConfig.OCIRuntimes
+ runtime.config.RuntimePath = tmpConfig.RuntimePath
runtime.config.CNIPluginDir = tmpConfig.CNIPluginDir
runtime.config.NoPivotRoot = tmpConfig.NoPivotRoot
break
@@ -485,12 +505,37 @@ func NewRuntimeFromConfig(configPath string, options ...RuntimeOption) (runtime
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(runtime *Runtime) (err error) {
+
+ // Backward compatibility for `runtime_path`
+ if runtime.config.RuntimePath != nil {
+ // Don't print twice in rootless mode.
+ if os.Geteuid() == 0 {
+ logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`")
+ logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used")
+ }
+
+ // Transform `runtime_path` into `runtimes` and `runtime`.
+ name := filepath.Base(runtime.config.RuntimePath[0])
+ runtime.config.OCIRuntime = name
+ runtime.config.OCIRuntimes = map[string][]string{name: runtime.config.RuntimePath}
+ }
+
// Find a working OCI runtime binary
foundRuntime := false
// If runtime is an absolute path, then use it as it is.
- if runtime.config.OCIRuntime[0] == '/' {
+ if runtime.config.OCIRuntime != "" && runtime.config.OCIRuntime[0] == '/' {
foundRuntime = true
runtime.ociRuntimePath = OCIRuntimePath{Name: filepath.Base(runtime.config.OCIRuntime), Paths: []string{runtime.config.OCIRuntime}}
+ stat, err := os.Stat(runtime.config.OCIRuntime)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return errors.Wrapf(err, "the specified OCI runtime %s does not exist", runtime.config.OCIRuntime)
+ }
+ return errors.Wrapf(err, "cannot stat the OCI runtime path %s", runtime.config.OCIRuntime)
+ }
+ if !stat.Mode().IsRegular() {
+ return fmt.Errorf("the specified OCI runtime %s is not a valid file", runtime.config.OCIRuntime)
+ }
} else {
// If not, look it up in the configuration.
paths := runtime.config.OCIRuntimes[runtime.config.OCIRuntime]
@@ -731,6 +776,7 @@ func makeRuntime(runtime *Runtime) (err error) {
aliveLock.Unlock()
}
}()
+
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If the file doesn't exist, we need to refresh the state
@@ -756,12 +802,35 @@ func makeRuntime(runtime *Runtime) (err error) {
if err != nil {
return err
}
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
+ // ERANGE indicates a lock numbering mismatch.
+ // Since we're renumbering, this is not fatal.
+ // Remove the earlier set of locks and recreate.
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
+ return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ }
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return err
+ }
} else {
return err
}
}
runtime.lockManager = manager
+ // If we're renumbering locks, do it now.
+ // It breaks out of normal runtime init, and will not return a valid
+ // runtime.
+ if runtime.doRenumber {
+ if err := runtime.renumberLocks(); err != nil {
+ return err
+ }
+ }
+
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 9afdef7b6..2ec8d0795 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -10,9 +10,12 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulule/deepcopier"
@@ -44,6 +47,10 @@ func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
if rSpec == nil {
return nil, errors.Wrapf(ErrInvalidArg, "must provide a valid runtime spec to create container")
}
@@ -175,9 +182,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
if err != nil {
newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source))
if err != nil {
- logrus.Errorf("error creating named volume %q: %v", vol.Source, err)
+ return nil, errors.Wrapf(err, "error creating named volume %q", vol.Source)
}
ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
+ if err := ctr.copyWithTarFromImage(ctr.config.Spec.Mounts[i].Destination, ctr.config.Spec.Mounts[i].Source); err != nil && !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Source)
+ }
continue
}
ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
@@ -223,17 +233,23 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
// RemoveContainer removes the given container
// If force is specified, the container will be stopped first
+// If removeVolume is specified, named volumes used by the container will
+// be removed also if and only if the container is the sole user
// Otherwise, RemoveContainer will return an error if the container is running
-func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool) error {
+func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
r.lock.Lock()
defer r.lock.Unlock()
- return r.removeContainer(ctx, c, force)
+ return r.removeContainer(ctx, c, force, removeVolume)
}
// Internal function to remove a container
// Locks the container, but does not lock the runtime
-func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool) error {
+func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "removeContainer")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
if !c.valid {
if ok, _ := r.state.HasContainer(c.ID()); !ok {
// Container probably already removed
@@ -246,6 +262,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
// To avoid races around removing a container and the pod it is in
var pod *Pod
var err error
+ runtime := c.runtime
if c.config.Pod != "" {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
@@ -331,6 +348,13 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
}
+ var volumes []string
+ if removeVolume {
+ volumes, err = c.namedVolumes()
+ if err != nil {
+ logrus.Errorf("unable to retrieve builtin volumes for container %v: %v", c.ID(), err)
+ }
+ }
var cleanupErr error
// Remove the container from the state
if c.config.Pod != "" {
@@ -395,6 +419,14 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
}
+ for _, v := range volumes {
+ if volume, err := runtime.state.Volume(v); err == nil {
+ if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
+ logrus.Errorf("cleanup volume (%s): %v", v, err)
+ }
+ }
+ }
+
return cleanupErr
}
@@ -564,3 +596,16 @@ func (r *Runtime) Export(name string, path string) error {
return ctr.Export(path)
}
+
+// RemoveContainersFromStorage attempt to remove containers from storage that do not exist in libpod database
+func (r *Runtime) RemoveContainersFromStorage(ctrs []string) {
+ for _, i := range ctrs {
+ // if the container does not exist in database, attempt to remove it from storage
+ if _, err := r.LookupContainer(i); err != nil && errors.Cause(err) == image.ErrNoSuchCtr {
+ r.storageService.UnmountContainerImage(i, true)
+ if err := r.storageService.DeleteContainer(i); err != nil && errors.Cause(err) != storage.ErrContainerUnknown {
+ logrus.Errorf("Failed to remove container %q from storage: %s", i, err)
+ }
+ }
+ }
+}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index c20aa77a3..451c2ebe7 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -3,7 +3,6 @@ package libpod
import (
"context"
"fmt"
- "github.com/opencontainers/image-spec/specs-go/v1"
"io"
"io/ioutil"
"net/http"
@@ -15,6 +14,11 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
"github.com/pkg/errors"
+
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
+ ociarchive "github.com/containers/image/oci/archive"
+ "github.com/opencontainers/image-spec/specs-go/v1"
)
// Runtime API
@@ -43,7 +47,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool)
if len(imageCtrs) > 0 && len(img.Names()) <= 1 {
if force {
for _, ctr := range imageCtrs {
- if err := r.removeContainer(ctx, ctr, true); err != nil {
+ if err := r.removeContainer(ctx, ctr, true, false); err != nil {
return "", errors.Wrapf(err, "error removing image %s: container %s using image could not be removed", img.ID(), ctr.ID())
}
}
@@ -211,3 +215,41 @@ func downloadFromURL(source string) (string, error) {
return outFile.Name(), nil
}
+
+// LoadImage loads a container image into local storage
+func (r *Runtime) LoadImage(ctx context.Context, name, inputFile string, writer io.Writer, signaturePolicy string) (string, error) {
+ var newImages []*image.Image
+ src, err := dockerarchive.ParseReference(inputFile) // FIXME? We should add dockerarchive.NewReference()
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ // generate full src name with specified image:tag
+ src, err := ociarchive.NewReference(inputFile, name) // imageName may be ""
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ src, err := directory.NewReference(inputFile)
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ return "", errors.Wrapf(err, "error pulling %q", name)
+ }
+ }
+ }
+ return getImageNames(newImages), nil
+}
+
+func getImageNames(images []*image.Image) string {
+ var names string
+ for i := range images {
+ if i == 0 {
+ names = images[i].InputName
+ } else {
+ names += ", " + images[i].InputName
+ }
+ }
+ return names
+}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index c6d497c0c..c378d18e4 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -117,15 +117,6 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
if err := pod.save(); err != nil {
return nil, err
}
-
- // Once the pod infra container has been created, we start it
- if err := ctr.Start(ctx); err != nil {
- // If the infra container does not start, we need to tear the pod down.
- if err2 := r.removePod(ctx, pod, true, true); err2 != nil {
- logrus.Errorf("Error removing pod after infra container failed to start: %v", err2)
- }
- return nil, errors.Wrapf(err, "error starting Infra Container")
- }
}
return pod, nil
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
new file mode 100644
index 000000000..125cf0825
--- /dev/null
+++ b/libpod/runtime_renumber.go
@@ -0,0 +1,57 @@
+package libpod
+
+import (
+ "github.com/pkg/errors"
+)
+
+// renumberLocks reassigns lock numbers for all containers and pods in the
+// state.
+// TODO: It would be desirable to make it impossible to call this until all
+// other libpod sessions are dead.
+// Possibly use a read-write file lock, with all non-renumber podmans owning the
+// lock as read, renumber attempting to take a write lock?
+// The alternative is some sort of session tracking, and I don't know how
+// reliable that can be.
+func (r *Runtime) renumberLocks() error {
+ // Start off by deallocating all locks
+ if err := r.lockManager.FreeAllLocks(); err != nil {
+ return err
+ }
+
+ allCtrs, err := r.state.AllContainers()
+ if err != nil {
+ return err
+ }
+ for _, ctr := range allCtrs {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ }
+
+ ctr.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
+ return err
+ }
+ }
+ allPods, err := r.state.AllPods()
+ if err != nil {
+ return err
+ }
+ for _, pod := range allPods {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
+ }
+
+ pod.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewritePodConfig(pod, pod.config); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index 3921758ee..11f37ad4b 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -2,6 +2,9 @@ package libpod
import (
"context"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "strings"
)
// Contains the public Runtime API for volumes
@@ -16,7 +19,7 @@ type VolumeCreateOption func(*Volume) error
type VolumeFilter func(*Volume) bool
// RemoveVolume removes a volumes
-func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
@@ -32,10 +35,39 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- v.lock.Lock()
- defer v.lock.Unlock()
+ return r.removeVolume(ctx, v, force)
+}
+
+// RemoveVolumes removes a slice of volumes or all with a force bool
+func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, force bool) ([]string, error) {
+ var (
+ vols []*Volume
+ err error
+ deletedVols []string
+ )
+ if all {
+ vols, err = r.Volumes()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get all volumes")
+ }
+ } else {
+ for _, i := range volumes {
+ vol, err := r.GetVolume(i)
+ if err != nil {
+ return nil, err
+ }
+ vols = append(vols, vol)
+ }
+ }
- return r.removeVolume(ctx, v, force, prune)
+ for _, vol := range vols {
+ if err := r.RemoveVolume(ctx, vol, force); err != nil {
+ return deletedVols, err
+ }
+ logrus.Debugf("removed volume %s", vol.Name())
+ deletedVols = append(deletedVols, vol.Name())
+ }
+ return deletedVols, nil
}
// GetVolume retrieves a volume by its name
@@ -47,7 +79,21 @@ func (r *Runtime) GetVolume(name string) (*Volume, error) {
return nil, ErrRuntimeStopped
}
- return r.state.Volume(name)
+ vol, err := r.state.Volume(name)
+ if err == nil {
+ return vol, err
+ }
+
+ vols, err := r.GetAllVolumes()
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ if strings.HasPrefix(v.Name(), name) {
+ return v, nil
+ }
+ }
+ return nil, errors.Errorf("unable to find volume %s", name)
}
// HasVolume checks to see if a volume with the given name exists
@@ -105,3 +151,27 @@ func (r *Runtime) GetAllVolumes() ([]*Volume, error) {
return r.state.AllVolumes()
}
+
+// PruneVolumes removes unused volumes from the system
+func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
+ var (
+ prunedIDs []string
+ pruneErrors []error
+ )
+ vols, err := r.GetAllVolumes()
+ if err != nil {
+ pruneErrors = append(pruneErrors, err)
+ return nil, pruneErrors
+ }
+
+ for _, vol := range vols {
+ if err := r.RemoveVolume(ctx, vol, false); err != nil {
+ if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
+ pruneErrors = append(pruneErrors, err)
+ }
+ continue
+ }
+ prunedIDs = append(prunedIDs, vol.Name())
+ }
+ return prunedIDs, pruneErrors
+}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 0727cfedf..838c0167a 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
- lock, err := r.lockManager.AllocateLock()
- if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new volume")
- }
- volume.lock = lock
- volume.config.LockID = volume.lock.ID()
-
volume.valid = true
// Add the volume to state
@@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
- return ErrNoSuchVolume
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ return nil
+ }
+ return ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
return err
}
if len(deps) != 0 {
- if prune {
- return ErrVolumeBeingUsed
- }
depsStr := strings.Join(deps, ", ")
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
@@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
- if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
- }
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Set volume as invalid so it can no longer be used
- v.valid = false
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ logrus.Debugf("Removed volume %s", v.Name())
return nil
}
diff --git a/libpod/state.go b/libpod/state.go
index 88d89f673..98282fc83 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -97,6 +97,30 @@ type State interface {
// returned.
AllContainers() ([]*Container, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // Rewrite a container's configuration.
+ // This function breaks libpod's normal prohibition on a read-only
+ // configuration, and as such should be used EXTREMELY SPARINGLY and
+ // only in very specific circumstances.
+ // Specifically, it is ONLY safe to use thing function to make changes
+ // that result in a functionally identical configuration (migrating to
+ // newer, but identical, configuration fields), or during libpod init
+ // WHILE HOLDING THE ALIVE LOCK (to prevent other libpod instances from
+ // being initialized).
+ // Most things in config can be changed by this, but container ID and
+ // name ABSOLUTELY CANNOT BE ALTERED. If you do so, there is a high
+ // potential for database corruption.
+ // There are a lot of capital letters and conditions here, but the short
+ // answer is this: use this only very sparingly, and only if you really
+ // know what you're doing.
+ RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
+ // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
+ // This function is identical to RewriteContainerConfig, save for the
+ // fact that it is used with pods instead.
+ // It is subject to the same conditions as RewriteContainerConfig.
+ // Please do not use this unless you know what you're doing.
+ RewritePodConfig(pod *Pod, newCfg *PodConfig) error
+
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
// returned.
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 4bd00ab55..be68a2d69 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -1298,6 +1298,78 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) {
})
}
+func TestRewriteContainerConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewriteContainerConfig(&Container{}, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+ err = state.RewriteContainerConfig(testCtr, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr)
+ assert.NoError(t, err)
+
+ testCtr.config.LogPath = "/another/path/"
+
+ err = state.RewriteContainerConfig(testCtr, testCtr.config)
+ assert.NoError(t, err)
+
+ testCtrFromState, err := state.Container(testCtr.ID())
+ assert.NoError(t, err)
+
+ testContainersEqual(t, testCtrFromState, testCtr, true)
+ })
+}
+
+func TestRewritePodConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewritePodConfig(&Pod{}, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+ err = state.RewritePodConfig(testPod, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddPod(testPod)
+ assert.NoError(t, err)
+
+ testPod.config.CgroupParent = "/another_cgroup_parent"
+
+ err = state.RewritePodConfig(testPod, testPod.config)
+ assert.NoError(t, err)
+
+ testPodFromState, err := state.Pod(testPod.ID())
+ assert.NoError(t, err)
+
+ testPodsEqual(t, testPodFromState, testPod, true)
+ })
+}
+
func TestGetPodDoesNotExist(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Pod("doesnotexist")
diff --git a/libpod/storage.go b/libpod/storage.go
index 17d231171..1a7b13da6 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/opencontainers/image-spec/specs-go/v1"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -62,6 +63,10 @@ func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) {
// CreateContainerStorage creates the storage end of things. We already have the container spec created
// TO-DO We should be passing in an Image object in the future.
func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (cinfo ContainerInfo, err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "createContainerStorage")
+ span.SetTag("type", "storageService")
+ defer span.Finish()
+
var imageConfig *v1.Image
if imageName != "" {
var ref types.ImageReference
diff --git a/libpod/volume.go b/libpod/volume.go
index 026a3bf49..74878b6a4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,7 +1,5 @@
package libpod
-import "github.com/containers/libpod/libpod/lock"
-
// Volume is the type used to create named volumes
// TODO: all volumes should be created using this and the Volume API
type Volume struct {
@@ -9,7 +7,6 @@ type Volume struct {
valid bool
runtime *Runtime
- lock lock.Locker
}
// VolumeConfig holds the volume's config information
@@ -17,8 +14,6 @@ type Volume struct {
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`
- // ID of this volume's lock
- LockID uint32 `json:"lockID"`
Labels map[string]string `json:"labels"`
MountPoint string `json:"mountPoint"`
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 800e6d106..35f0ca19d 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -5,10 +5,6 @@ import (
"path/filepath"
)
-// VolumePath is the path under which all volumes that are created using the
-// local driver will be created
-// const VolumePath = "/var/lib/containers/storage/volumes"
-
// Creates a new volume
func newVolume(runtime *Runtime) (*Volume, error) {
volume := new(Volume)
@@ -22,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
- if !v.valid {
- return ErrNoSuchVolume
- }
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
}
diff --git a/libpod/adapter/client.go b/pkg/adapter/client.go
index 6512a5952..6512a5952 100644
--- a/libpod/adapter/client.go
+++ b/pkg/adapter/client.go
diff --git a/libpod/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index 9623304e5..3f43a6905 100644
--- a/libpod/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -4,6 +4,7 @@ package adapter
import (
"encoding/json"
+ "github.com/containers/libpod/cmd/podman/shared"
iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
@@ -48,3 +49,33 @@ func (c *Container) Config() *libpod.ContainerConfig {
}
return c.Runtime.Config(c.ID())
}
+
+// Name returns the name of the container
+func (c *Container) Name() string {
+ return c.config.Name
+}
+
+// BatchContainerOp is wrapper func to mimic shared's function with a similar name meant for libpod
+func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContainerStruct, error) {
+ // TODO If pod ps ever shows container's sizes, re-enable this code; otherwise it isn't needed
+ // and would be a perf hit
+ //data, err := ctr.Inspect(true)
+ //if err != nil {
+ // return shared.BatchContainerStruct{}, err
+ //}
+ //
+ //size := new(shared.ContainerSize)
+ //size.RootFsSize = data.SizeRootFs
+ //size.RwSize = data.SizeRw
+
+ bcs := shared.BatchContainerStruct{
+ ConConfig: ctr.config,
+ ConState: ctr.state.State,
+ ExitCode: ctr.state.ExitCode,
+ Pid: ctr.state.PID,
+ StartedTime: ctr.state.StartedTime,
+ ExitedTime: ctr.state.FinishedTime,
+ //Size: size,
+ }
+ return bcs, nil
+}
diff --git a/libpod/adapter/images_remote.go b/pkg/adapter/images_remote.go
index e7b38dccc..e7b38dccc 100644
--- a/libpod/adapter/images_remote.go
+++ b/pkg/adapter/images_remote.go
diff --git a/libpod/adapter/info_remote.go b/pkg/adapter/info_remote.go
index 3b691ed17..3b691ed17 100644
--- a/libpod/adapter/info_remote.go
+++ b/pkg/adapter/info_remote.go
diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go
new file mode 100644
index 000000000..706a8fe96
--- /dev/null
+++ b/pkg/adapter/pods.go
@@ -0,0 +1,323 @@
+// +build !remoteclient
+
+package adapter
+
+import (
+ "context"
+ "strings"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/adapter/shortcuts"
+)
+
+// Pod ...
+type Pod struct {
+ *libpod.Pod
+}
+
+// RemovePods ...
+func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) {
+ var (
+ errs []error
+ podids []string
+ )
+ pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ errs = append(errs, err)
+ return nil, errs
+ }
+
+ for _, p := range pods {
+ if err := r.RemovePod(ctx, p, cli.Force, cli.Force); err != nil {
+ errs = append(errs, err)
+ } else {
+ podids = append(podids, p.ID())
+ }
+ }
+ return podids, errs
+}
+
+// GetLatestPod gets the latest pod and wraps it in an adapter pod
+func (r *LocalRuntime) GetLatestPod() (*Pod, error) {
+ pod := Pod{}
+ p, err := r.Runtime.GetLatestPod()
+ pod.Pod = p
+ return &pod, err
+}
+
+// GetAllPods gets all pods and wraps it in an adapter pod
+func (r *LocalRuntime) GetAllPods() ([]*Pod, error) {
+ var pods []*Pod
+ allPods, err := r.Runtime.GetAllPods()
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range allPods {
+ pod := Pod{}
+ pod.Pod = p
+ pods = append(pods, &pod)
+ }
+ return pods, nil
+}
+
+// LookupPod gets a pod by name or id and wraps it in an adapter pod
+func (r *LocalRuntime) LookupPod(nameOrID string) (*Pod, error) {
+ pod := Pod{}
+ p, err := r.Runtime.LookupPod(nameOrID)
+ pod.Pod = p
+ return &pod, err
+}
+
+// StopPods is a wrapper to libpod to stop pods based on a cli context
+func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValues) ([]string, []error) {
+ timeout := -1
+ if cli.Flags().Changed("timeout") {
+ timeout = int(cli.Timeout)
+ }
+ var (
+ errs []error
+ podids []string
+ )
+ pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ errs = append(errs, err)
+ return nil, errs
+ }
+
+ for _, p := range pods {
+ stopped := true
+ conErrs, stopErr := p.StopWithTimeout(ctx, true, int(timeout))
+ if stopErr != nil {
+ errs = append(errs, stopErr)
+ stopped = false
+ }
+ if conErrs != nil {
+ stopped = false
+ for _, err := range conErrs {
+ errs = append(errs, err)
+ }
+ }
+ if stopped {
+ podids = append(podids, p.ID())
+ }
+ }
+ return podids, errs
+}
+
+// KillPods is a wrapper to libpod to start pods based on the cli context
+func (r *LocalRuntime) KillPods(ctx context.Context, cli *cliconfig.PodKillValues, signal uint) ([]string, []error) {
+ var (
+ errs []error
+ podids []string
+ )
+ pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ errs = append(errs, err)
+ return nil, errs
+ }
+ for _, p := range pods {
+ killed := true
+ conErrs, killErr := p.Kill(signal)
+ if killErr != nil {
+ errs = append(errs, killErr)
+ killed = false
+ }
+ if conErrs != nil {
+ killed = false
+ for _, err := range conErrs {
+ errs = append(errs, err)
+ }
+ }
+ if killed {
+ podids = append(podids, p.ID())
+ }
+ }
+ return podids, errs
+}
+
+// StartPods is a wrapper to start pods based on the cli context
+func (r *LocalRuntime) StartPods(ctx context.Context, cli *cliconfig.PodStartValues) ([]string, []error) {
+ var (
+ errs []error
+ podids []string
+ )
+ pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ errs = append(errs, err)
+ return nil, errs
+ }
+ for _, p := range pods {
+ started := true
+ conErrs, startErr := p.Start(ctx)
+ if startErr != nil {
+ errs = append(errs, startErr)
+ started = false
+ }
+ if conErrs != nil {
+ started = false
+ for _, err := range conErrs {
+ errs = append(errs, err)
+ }
+ }
+ if started {
+ podids = append(podids, p.ID())
+ }
+ }
+ return podids, errs
+}
+
+// CreatePod is a wrapper for libpod and creating a new pod from the cli context
+func (r *LocalRuntime) CreatePod(ctx context.Context, cli *cliconfig.PodCreateValues, labels map[string]string) (string, error) {
+ var (
+ options []libpod.PodCreateOption
+ err error
+ )
+
+ if cli.Flag("cgroup-parent").Changed {
+ options = append(options, libpod.WithPodCgroupParent(cli.CgroupParent))
+ }
+
+ if len(labels) != 0 {
+ options = append(options, libpod.WithPodLabels(labels))
+ }
+
+ if cli.Flag("name").Changed {
+ options = append(options, libpod.WithPodName(cli.Name))
+ }
+
+ if cli.Infra {
+ options = append(options, libpod.WithInfraContainer())
+ nsOptions, err := shared.GetNamespaceOptions(strings.Split(cli.Share, ","))
+ if err != nil {
+ return "", err
+ }
+ options = append(options, nsOptions...)
+ }
+
+ if len(cli.Publish) > 0 {
+ portBindings, err := shared.CreatePortBindings(cli.Publish)
+ if err != nil {
+ return "", err
+ }
+ options = append(options, libpod.WithInfraContainerPorts(portBindings))
+
+ }
+ // always have containers use pod cgroups
+ // User Opt out is not yet supported
+ options = append(options, libpod.WithPodCgroups())
+
+ pod, err := r.NewPod(ctx, options...)
+ if err != nil {
+ return "", err
+ }
+ return pod.ID(), nil
+}
+
+// GetPodStatus is a wrapper to get the status of a local libpod pod
+func (p *Pod) GetPodStatus() (string, error) {
+ return shared.GetPodStatus(p.Pod)
+}
+
+// BatchContainerOp is a wrapper for the shared function of the same name
+func BatchContainerOp(ctr *libpod.Container, opts shared.PsOptions) (shared.BatchContainerStruct, error) {
+ return shared.BatchContainerOp(ctr, opts)
+}
+
+// PausePods is a wrapper for pausing pods via libpod
+func (r *LocalRuntime) PausePods(c *cliconfig.PodPauseValues) ([]string, map[string]error, []error) {
+ var (
+ pauseIDs []string
+ pauseErrors []error
+ )
+ containerErrors := make(map[string]error)
+
+ pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime)
+ if err != nil {
+ pauseErrors = append(pauseErrors, err)
+ return nil, containerErrors, pauseErrors
+ }
+
+ for _, pod := range pods {
+ ctrErrs, err := pod.Pause()
+ if err != nil {
+ pauseErrors = append(pauseErrors, err)
+ continue
+ }
+ if ctrErrs != nil {
+ for ctr, err := range ctrErrs {
+ containerErrors[ctr] = err
+ }
+ continue
+ }
+ pauseIDs = append(pauseIDs, pod.ID())
+
+ }
+ return pauseIDs, containerErrors, pauseErrors
+}
+
+// UnpausePods is a wrapper for unpausing pods via libpod
+func (r *LocalRuntime) UnpausePods(c *cliconfig.PodUnpauseValues) ([]string, map[string]error, []error) {
+ var (
+ unpauseIDs []string
+ unpauseErrors []error
+ )
+ containerErrors := make(map[string]error)
+
+ pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime)
+ if err != nil {
+ unpauseErrors = append(unpauseErrors, err)
+ return nil, containerErrors, unpauseErrors
+ }
+
+ for _, pod := range pods {
+ ctrErrs, err := pod.Unpause()
+ if err != nil {
+ unpauseErrors = append(unpauseErrors, err)
+ continue
+ }
+ if ctrErrs != nil {
+ for ctr, err := range ctrErrs {
+ containerErrors[ctr] = err
+ }
+ continue
+ }
+ unpauseIDs = append(unpauseIDs, pod.ID())
+
+ }
+ return unpauseIDs, containerErrors, unpauseErrors
+}
+
+// RestartPods is a wrapper to restart pods via libpod
+func (r *LocalRuntime) RestartPods(ctx context.Context, c *cliconfig.PodRestartValues) ([]string, map[string]error, []error) {
+ var (
+ restartIDs []string
+ restartErrors []error
+ )
+ containerErrors := make(map[string]error)
+
+ pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime)
+ if err != nil {
+ restartErrors = append(restartErrors, err)
+ return nil, containerErrors, restartErrors
+ }
+
+ for _, pod := range pods {
+ ctrErrs, err := pod.Restart(ctx)
+ if err != nil {
+ restartErrors = append(restartErrors, err)
+ continue
+ }
+ if ctrErrs != nil {
+ for ctr, err := range ctrErrs {
+ containerErrors[ctr] = err
+ }
+ continue
+ }
+ restartIDs = append(restartIDs, pod.ID())
+
+ }
+ return restartIDs, containerErrors, restartErrors
+
+}
diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go
new file mode 100644
index 000000000..220f7163f
--- /dev/null
+++ b/pkg/adapter/pods_remote.go
@@ -0,0 +1,401 @@
+// +build remoteclient
+
+package adapter
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+ "time"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+ "github.com/ulule/deepcopier"
+)
+
+// Pod ...
+type Pod struct {
+ remotepod
+}
+
+type remotepod struct {
+ config *libpod.PodConfig
+ state *libpod.PodInspectState
+ containers []libpod.PodContainerInfo
+ Runtime *LocalRuntime
+}
+
+// RemovePods removes one or more based on the cli context.
+func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) {
+ var (
+ rmErrs []error
+ rmPods []string
+ )
+ podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ rmErrs = append(rmErrs, err)
+ return nil, rmErrs
+ }
+
+ for _, p := range podIDs {
+ reply, err := iopodman.RemovePod().Call(r.Conn, p, cli.Force)
+ if err != nil {
+ rmErrs = append(rmErrs, err)
+ } else {
+ rmPods = append(rmPods, reply)
+ }
+ }
+ return rmPods, rmErrs
+}
+
+// Inspect looks up a pod by name or id and embeds its data into a remote pod
+// object.
+func (r *LocalRuntime) Inspect(nameOrID string) (*Pod, error) {
+ reply, err := iopodman.PodStateData().Call(r.Conn, nameOrID)
+ if err != nil {
+ return nil, err
+ }
+ data := libpod.PodInspect{}
+ if err := json.Unmarshal([]byte(reply), &data); err != nil {
+ return nil, err
+ }
+ pod := Pod{}
+ pod.Runtime = r
+ pod.config = data.Config
+ pod.state = data.State
+ pod.containers = data.Containers
+ return &pod, nil
+}
+
+// GetLatestPod gets the latest pod and wraps it in an adapter pod
+func (r *LocalRuntime) GetLatestPod() (*Pod, error) {
+ reply, err := iopodman.GetPodsByContext().Call(r.Conn, false, true, nil)
+ if err != nil {
+ return nil, err
+ }
+ if len(reply) > 0 {
+ return r.Inspect(reply[0])
+ }
+ return nil, errors.New("no pods exist")
+}
+
+// LookupPod gets a pod by name or ID and wraps it in an adapter pod
+func (r *LocalRuntime) LookupPod(nameOrID string) (*Pod, error) {
+ return r.Inspect(nameOrID)
+}
+
+// Inspect, like libpod pod inspect, returns a libpod.PodInspect object from
+// the data of a remotepod data struct
+func (p *Pod) Inspect() (*libpod.PodInspect, error) {
+ config := new(libpod.PodConfig)
+ deepcopier.Copy(p.remotepod.config).To(config)
+ inspectData := libpod.PodInspect{
+ Config: config,
+ State: p.remotepod.state,
+ Containers: p.containers,
+ }
+ return &inspectData, nil
+}
+
+// StopPods stops pods based on the cli context from the remote client.
+func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValues) ([]string, []error) {
+ var (
+ stopErrs []error
+ stopPods []string
+ )
+ var timeout int64 = -1
+ if cli.Flags().Changed("timeout") {
+ timeout = int64(cli.Timeout)
+ }
+ podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ for _, p := range podIDs {
+ podID, err := iopodman.StopPod().Call(r.Conn, p, timeout)
+ if err != nil {
+ stopErrs = append(stopErrs, err)
+ } else {
+ stopPods = append(stopPods, podID)
+ }
+ }
+ return stopPods, stopErrs
+}
+
+// KillPods kills pods over varlink for the remoteclient
+func (r *LocalRuntime) KillPods(ctx context.Context, cli *cliconfig.PodKillValues, signal uint) ([]string, []error) {
+ var (
+ killErrs []error
+ killPods []string
+ )
+
+ podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ for _, p := range podIDs {
+ podID, err := iopodman.KillPod().Call(r.Conn, p, int64(signal))
+ if err != nil {
+ killErrs = append(killErrs, err)
+ } else {
+ killPods = append(killPods, podID)
+ }
+ }
+ return killPods, killErrs
+}
+
+// StartPods starts pods for the remote client over varlink
+func (r *LocalRuntime) StartPods(ctx context.Context, cli *cliconfig.PodStartValues) ([]string, []error) {
+ var (
+ startErrs []error
+ startPods []string
+ )
+
+ podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ for _, p := range podIDs {
+ podID, err := iopodman.StartPod().Call(r.Conn, p)
+ if err != nil {
+ startErrs = append(startErrs, err)
+ } else {
+ startPods = append(startPods, podID)
+ }
+ }
+ return startPods, startErrs
+}
+
+// CreatePod creates a pod for the remote client over a varlink connection
+func (r *LocalRuntime) CreatePod(ctx context.Context, cli *cliconfig.PodCreateValues, labels map[string]string) (string, error) {
+ pc := iopodman.PodCreate{
+ Name: cli.Name,
+ CgroupParent: cli.CgroupParent,
+ Labels: labels,
+ Share: strings.Split(cli.Share, ","),
+ Infra: cli.Infra,
+ InfraCommand: cli.InfraCommand,
+ InfraImage: cli.InfraCommand,
+ Publish: cli.Publish,
+ }
+
+ return iopodman.CreatePod().Call(r.Conn, pc)
+}
+
+// GetAllPods is a helper function that gets all pods for the remote client
+func (r *LocalRuntime) GetAllPods() ([]*Pod, error) {
+ var pods []*Pod
+ podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, true, false, []string{})
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range podIDs {
+ pod, err := r.LookupPod(p)
+ if err != nil {
+ return nil, err
+ }
+ pods = append(pods, pod)
+ }
+ return pods, nil
+}
+
+// ID returns the id of a remote pod
+func (p *Pod) ID() string {
+ return p.config.ID
+}
+
+// Name returns the name of the remote pod
+func (p *Pod) Name() string {
+ return p.config.Name
+}
+
+// AllContainersByID returns a slice of a pod's container IDs
+func (p *Pod) AllContainersByID() ([]string, error) {
+ var containerIDs []string
+ for _, ctr := range p.containers {
+ containerIDs = append(containerIDs, ctr.ID)
+ }
+ return containerIDs, nil
+}
+
+// AllContainers returns a pods containers
+func (p *Pod) AllContainers() ([]*Container, error) {
+ var containers []*Container
+ for _, ctr := range p.containers {
+ container, err := p.Runtime.LookupContainer(ctr.ID)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, container)
+ }
+ return containers, nil
+}
+
+// Status ...
+func (p *Pod) Status() (map[string]libpod.ContainerStatus, error) {
+ ctrs := make(map[string]libpod.ContainerStatus)
+ for _, i := range p.containers {
+ var status libpod.ContainerStatus
+ switch i.State {
+ case "exited":
+ status = libpod.ContainerStateExited
+ case "stopped":
+ status = libpod.ContainerStateStopped
+ case "running":
+ status = libpod.ContainerStateRunning
+ case "paused":
+ status = libpod.ContainerStatePaused
+ case "created":
+ status = libpod.ContainerStateCreated
+ case "configured":
+ status = libpod.ContainerStateConfigured
+ default:
+ status = libpod.ContainerStateUnknown
+ }
+ ctrs[i.ID] = status
+ }
+ return ctrs, nil
+}
+
+// GetPodStatus is a wrapper to get the string version of the status
+func (p *Pod) GetPodStatus() (string, error) {
+ ctrStatuses, err := p.Status()
+ if err != nil {
+ return "", err
+ }
+ return shared.CreatePodStatusResults(ctrStatuses)
+}
+
+// InfraContainerID returns the ID of the infra container in a pod
+func (p *Pod) InfraContainerID() (string, error) {
+ return p.state.InfraContainerID, nil
+}
+
+// CreatedTime returns the time the container was created as a time.Time
+func (p *Pod) CreatedTime() time.Time {
+ return p.config.CreatedTime
+}
+
+// SharesPID ....
+func (p *Pod) SharesPID() bool {
+ return p.config.UsePodPID
+}
+
+// SharesIPC returns whether containers in pod
+// default to use IPC namespace of first container in pod
+func (p *Pod) SharesIPC() bool {
+ return p.config.UsePodIPC
+}
+
+// SharesNet returns whether containers in pod
+// default to use network namespace of first container in pod
+func (p *Pod) SharesNet() bool {
+ return p.config.UsePodNet
+}
+
+// SharesMount returns whether containers in pod
+// default to use PID namespace of first container in pod
+func (p *Pod) SharesMount() bool {
+ return p.config.UsePodMount
+}
+
+// SharesUser returns whether containers in pod
+// default to use user namespace of first container in pod
+func (p *Pod) SharesUser() bool {
+ return p.config.UsePodUser
+}
+
+// SharesUTS returns whether containers in pod
+// default to use UTS namespace of first container in pod
+func (p *Pod) SharesUTS() bool {
+ return p.config.UsePodUTS
+}
+
+// SharesCgroup returns whether containers in the pod will default to this pod's
+// cgroup instead of the default libpod parent
+func (p *Pod) SharesCgroup() bool {
+ return p.config.UsePodCgroup
+}
+
+// CgroupParent returns the pod's CGroup parent
+func (p *Pod) CgroupParent() string {
+ return p.config.CgroupParent
+}
+
+// PausePods pauses a pod using varlink and the remote client
+func (r *LocalRuntime) PausePods(c *cliconfig.PodPauseValues) ([]string, map[string]error, []error) {
+ var (
+ pauseIDs []string
+ pauseErrors []error
+ )
+ containerErrors := make(map[string]error)
+
+ pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
+ if err != nil {
+ pauseErrors = append(pauseErrors, err)
+ return nil, containerErrors, pauseErrors
+ }
+ for _, pod := range pods {
+ reply, err := iopodman.PausePod().Call(r.Conn, pod)
+ if err != nil {
+ pauseErrors = append(pauseErrors, err)
+ continue
+ }
+ pauseIDs = append(pauseIDs, reply)
+ }
+ return pauseIDs, nil, pauseErrors
+}
+
+// UnpausePods unpauses a pod using varlink and the remote client
+func (r *LocalRuntime) UnpausePods(c *cliconfig.PodUnpauseValues) ([]string, map[string]error, []error) {
+ var (
+ unpauseIDs []string
+ unpauseErrors []error
+ )
+ containerErrors := make(map[string]error)
+
+ pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
+ if err != nil {
+ unpauseErrors = append(unpauseErrors, err)
+ return nil, containerErrors, unpauseErrors
+ }
+ for _, pod := range pods {
+ reply, err := iopodman.UnpausePod().Call(r.Conn, pod)
+ if err != nil {
+ unpauseErrors = append(unpauseErrors, err)
+ continue
+ }
+ unpauseIDs = append(unpauseIDs, reply)
+ }
+ return unpauseIDs, nil, unpauseErrors
+}
+
+// RestartPods restarts pods using varlink and the remote client
+func (r *LocalRuntime) RestartPods(ctx context.Context, c *cliconfig.PodRestartValues) ([]string, map[string]error, []error) {
+ var (
+ restartIDs []string
+ restartErrors []error
+ )
+ containerErrors := make(map[string]error)
+
+ pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
+ if err != nil {
+ restartErrors = append(restartErrors, err)
+ return nil, containerErrors, restartErrors
+ }
+ for _, pod := range pods {
+ reply, err := iopodman.RestartPod().Call(r.Conn, pod)
+ if err != nil {
+ restartErrors = append(restartErrors, err)
+ continue
+ }
+ restartIDs = append(restartIDs, reply)
+ }
+ return restartIDs, nil, restartErrors
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
new file mode 100644
index 000000000..4f5b98dbb
--- /dev/null
+++ b/pkg/adapter/runtime.go
@@ -0,0 +1,335 @@
+// +build !remoteclient
+
+package adapter
+
+import (
+ "context"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/imagebuildah"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/pkg/errors"
+)
+
+// LocalRuntime describes a typical libpod runtime
+type LocalRuntime struct {
+ *libpod.Runtime
+ Remote bool
+}
+
+// ContainerImage ...
+type ContainerImage struct {
+ *image.Image
+}
+
+// Container ...
+type Container struct {
+ *libpod.Container
+}
+
+// Volume ...
+type Volume struct {
+ *libpod.Volume
+}
+
+// VolumeFilter is for filtering volumes on the client
+type VolumeFilter func(*Volume) bool
+
+// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
+func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return nil, err
+ }
+ return &LocalRuntime{
+ Runtime: runtime,
+ }, nil
+}
+
+// GetImages returns a slice of images in containerimages
+func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
+ var containerImages []*ContainerImage
+ images, err := r.Runtime.ImageRuntime().GetImages()
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range images {
+ containerImages = append(containerImages, &ContainerImage{i})
+ }
+ return containerImages, nil
+
+}
+
+// NewImageFromLocal returns a containerimage representation of a image from local storage
+func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
+ img, err := r.Runtime.ImageRuntime().NewFromLocal(name)
+ if err != nil {
+ return nil, err
+ }
+ return &ContainerImage{img}, nil
+}
+
+// LoadFromArchiveReference calls into local storage to load an image from an archive
+func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
+ var containerImages []*ContainerImage
+ imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer)
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range imgs {
+ ci := ContainerImage{i}
+ containerImages = append(containerImages, &ci)
+ }
+ return containerImages, nil
+}
+
+// New calls into local storage to look for an image in local storage or to pull it
+func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
+ img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, forcePull, label)
+ if err != nil {
+ return nil, err
+ }
+ return &ContainerImage{img}, nil
+}
+
+// RemoveImage calls into local storage and removes an image
+func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
+ return r.Runtime.RemoveImage(ctx, img.Image, force)
+}
+
+// LookupContainer ...
+func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
+ ctr, err := r.Runtime.LookupContainer(idOrName)
+ if err != nil {
+ return nil, err
+ }
+ return &Container{ctr}, nil
+}
+
+// PruneImages is wrapper into PruneImages within the image pkg
+func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
+ return r.ImageRuntime().PruneImages(all)
+}
+
+// Export is a wrapper to container export to a tarfile
+func (r *LocalRuntime) Export(name string, path string) error {
+ ctr, err := r.Runtime.LookupContainer(name)
+ if err != nil {
+ return errors.Wrapf(err, "error looking up container %q", name)
+ }
+ if os.Geteuid() != 0 {
+ state, err := ctr.State()
+ if err != nil {
+ return errors.Wrapf(err, "cannot read container state %q", ctr.ID())
+ }
+ if state == libpod.ContainerStateRunning || state == libpod.ContainerStatePaused {
+ data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
+ if err != nil {
+ return errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
+ }
+ conmonPid, err := strconv.Atoi(string(data))
+ if err != nil {
+ return errors.Wrapf(err, "cannot parse PID %q", data)
+ }
+ became, ret, err := rootless.JoinDirectUserAndMountNS(uint(conmonPid))
+ if err != nil {
+ return err
+ }
+ if became {
+ os.Exit(ret)
+ }
+ } else {
+ became, ret, err := rootless.BecomeRootInUserNS()
+ if err != nil {
+ return err
+ }
+ if became {
+ os.Exit(ret)
+ }
+ }
+ }
+
+ return ctr.Export(path)
+}
+
+// Import is a wrapper to import a container image
+func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
+ return r.Runtime.Import(ctx, source, reference, changes, history, quiet)
+}
+
+// CreateVolume is a wrapper to create volumes
+func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) {
+ var (
+ options []libpod.VolumeCreateOption
+ volName string
+ )
+
+ if len(c.InputArgs) > 0 {
+ volName = c.InputArgs[0]
+ options = append(options, libpod.WithVolumeName(volName))
+ }
+
+ if c.Flag("driver").Changed {
+ options = append(options, libpod.WithVolumeDriver(c.Driver))
+ }
+
+ if len(labels) != 0 {
+ options = append(options, libpod.WithVolumeLabels(labels))
+ }
+
+ if len(options) != 0 {
+ options = append(options, libpod.WithVolumeOptions(opts))
+ }
+ newVolume, err := r.NewVolume(ctx, options...)
+ if err != nil {
+ return "", err
+ }
+ return newVolume.Name(), nil
+}
+
+// RemoveVolumes is a wrapper to remove volumes
+func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, error) {
+ return r.Runtime.RemoveVolumes(ctx, c.InputArgs, c.All, c.Force)
+}
+
+// Push is a wrapper to push an image to a registry
+func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
+ newImage, err := r.ImageRuntime().NewFromLocal(srcName)
+ if err != nil {
+ return err
+ }
+ return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil)
+}
+
+// InspectVolumes returns a slice of volumes based on an arg list or --all
+func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*Volume, error) {
+ var (
+ volumes []*libpod.Volume
+ err error
+ )
+
+ if c.All {
+ volumes, err = r.GetAllVolumes()
+ } else {
+ for _, v := range c.InputArgs {
+ vol, err := r.GetVolume(v)
+ if err != nil {
+ return nil, err
+ }
+ volumes = append(volumes, vol)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return libpodVolumeToVolume(volumes), nil
+}
+
+// Volumes returns a slice of localruntime volumes
+func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) {
+ vols, err := r.GetAllVolumes()
+ if err != nil {
+ return nil, err
+ }
+ return libpodVolumeToVolume(vols), nil
+}
+
+// libpodVolumeToVolume converts a slice of libpod volumes to a slice
+// of localruntime volumes (same as libpod)
+func libpodVolumeToVolume(volumes []*libpod.Volume) []*Volume {
+ var vols []*Volume
+ for _, v := range volumes {
+ newVol := Volume{
+ v,
+ }
+ vols = append(vols, &newVol)
+ }
+ return vols
+}
+
+// Build is the wrapper to build images
+func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) error {
+ namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c.PodmanCommand.Command)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing namespace-related options")
+ }
+ usernsOption, idmappingOptions, err := parse.IDMappingOptions(c.PodmanCommand.Command)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing ID mapping options")
+ }
+ namespaceOptions.AddOrReplace(usernsOption...)
+
+ systemContext, err := parse.SystemContextFromOptions(c.PodmanCommand.Command)
+ if err != nil {
+ return errors.Wrapf(err, "error building system context")
+ }
+
+ authfile := c.Authfile
+ if len(c.Authfile) == 0 {
+ authfile = os.Getenv("REGISTRY_AUTH_FILE")
+ }
+
+ systemContext.AuthFilePath = authfile
+ commonOpts, err := parse.CommonBuildOptions(c.PodmanCommand.Command)
+ if err != nil {
+ return err
+ }
+
+ options.NamespaceOptions = namespaceOptions
+ options.ConfigureNetwork = networkPolicy
+ options.IDMappingOptions = idmappingOptions
+ options.CommonBuildOpts = commonOpts
+ options.SystemContext = systemContext
+
+ if c.Flag("runtime").Changed {
+ options.Runtime = r.GetOCIRuntimePath()
+ }
+ if c.Quiet {
+ options.ReportWriter = ioutil.Discard
+ }
+
+ if rootless.IsRootless() {
+ options.Isolation = buildah.IsolationOCIRootless
+ }
+
+ return r.Runtime.Build(ctx, options, dockerfiles...)
+}
+
+// PruneVolumes is a wrapper function for libpod PruneVolumes
+func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
+ return r.Runtime.PruneVolumes(ctx)
+}
+
+// SaveImage is a wrapper function for saving an image to the local filesystem
+func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error {
+ source := c.InputArgs[0]
+ additionalTags := c.InputArgs[1:]
+
+ newImage, err := r.Runtime.ImageRuntime().NewFromLocal(source)
+ if err != nil {
+ return err
+ }
+ return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress)
+}
+
+// LoadImage is a wrapper function for libpod PruneVolumes
+func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) {
+ var (
+ writer io.Writer
+ )
+ if !cli.Quiet {
+ writer = os.Stderr
+ }
+ return r.Runtime.LoadImage(ctx, name, cli.Input, writer, cli.SignaturePolicy)
+}
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
new file mode 100644
index 000000000..ca2fad852
--- /dev/null
+++ b/pkg/adapter/runtime_remote.go
@@ -0,0 +1,798 @@
+// +build remoteclient
+
+package adapter
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/imagebuildah"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/utils"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/varlink/go/varlink"
+)
+
+// ImageRuntime is wrapper for image runtime
+type RemoteImageRuntime struct{}
+
+// RemoteRuntime describes a wrapper runtime struct
+type RemoteRuntime struct {
+ Conn *varlink.Connection
+ Remote bool
+}
+
+// LocalRuntime describes a typical libpod runtime
+type LocalRuntime struct {
+ *RemoteRuntime
+}
+
+// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
+func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+ runtime := RemoteRuntime{}
+ conn, err := runtime.Connect()
+ if err != nil {
+ return nil, err
+ }
+ rr := RemoteRuntime{
+ Conn: conn,
+ Remote: true,
+ }
+ foo := LocalRuntime{
+ &rr,
+ }
+ return &foo, nil
+}
+
+// Shutdown is a bogus wrapper for compat with the libpod runtime
+func (r RemoteRuntime) Shutdown(force bool) error {
+ return nil
+}
+
+// ContainerImage
+type ContainerImage struct {
+ remoteImage
+}
+
+type remoteImage struct {
+ ID string
+ Labels map[string]string
+ RepoTags []string
+ RepoDigests []string
+ Parent string
+ Size int64
+ Created time.Time
+ InputName string
+ Names []string
+ Digest digest.Digest
+ isParent bool
+ Runtime *LocalRuntime
+}
+
+// Container ...
+type Container struct {
+ remoteContainer
+}
+
+// remoteContainer ....
+type remoteContainer struct {
+ Runtime *LocalRuntime
+ config *libpod.ContainerConfig
+ state *libpod.ContainerState
+}
+
+type VolumeFilter func(*Volume) bool
+
+// Volume is embed for libpod volumes
+type Volume struct {
+ remoteVolume
+}
+
+type remoteVolume struct {
+ Runtime *LocalRuntime
+ config *libpod.VolumeConfig
+}
+
+// GetImages returns a slice of containerimages over a varlink connection
+func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
+ var newImages []*ContainerImage
+ images, err := iopodman.ListImages().Call(r.Conn)
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range images {
+ name := i.Id
+ if len(i.RepoTags) > 1 {
+ name = i.RepoTags[0]
+ }
+ newImage, err := imageInListToContainerImage(i, name, r)
+ if err != nil {
+ return nil, err
+ }
+ newImages = append(newImages, newImage)
+ }
+ return newImages, nil
+}
+
+func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRuntime) (*ContainerImage, error) {
+ created, err := time.ParseInLocation(time.RFC3339, i.Created, time.UTC)
+ if err != nil {
+ return nil, err
+ }
+ ri := remoteImage{
+ InputName: name,
+ ID: i.Id,
+ Labels: i.Labels,
+ RepoTags: i.RepoTags,
+ RepoDigests: i.RepoTags,
+ Parent: i.ParentId,
+ Size: i.Size,
+ Created: created,
+ Names: i.RepoTags,
+ isParent: i.IsParent,
+ Runtime: runtime,
+ }
+ return &ContainerImage{ri}, nil
+}
+
+// NewImageFromLocal returns a container image representation of a image over varlink
+func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
+ img, err := iopodman.GetImage().Call(r.Conn, name)
+ if err != nil {
+ return nil, err
+ }
+ return imageInListToContainerImage(img, name, r)
+
+}
+
+// LoadFromArchiveReference creates an image from a local archive
+func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
+ var iid string
+ // TODO We need to find a way to leak certDir, creds, and the tlsverify into this function, normally this would
+ // come from cli options but we don't want want those in here either.
+ tlsverify := true
+ reply, err := iopodman.PullImage().Send(r.Conn, varlink.More, srcRef.DockerReference().String(), "", "", signaturePolicyPath, &tlsverify)
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ iid = responses.Id
+ if flags&varlink.Continues == 0 {
+ break
+ }
+ }
+
+ newImage, err := r.NewImageFromLocal(iid)
+ if err != nil {
+ return nil, err
+ }
+ return []*ContainerImage{newImage}, nil
+}
+
+// New calls into local storage to look for an image in local storage or to pull it
+func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
+ var iid string
+ if label != nil {
+ return nil, errors.New("the remote client function does not support checking a remote image for a label")
+ }
+ var (
+ tlsVerify bool
+ tlsVerifyPtr *bool
+ )
+ if dockeroptions.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ tlsVerify = true
+ tlsVerifyPtr = &tlsVerify
+
+ }
+ if dockeroptions.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue {
+ tlsVerify = false
+ tlsVerifyPtr = &tlsVerify
+ }
+
+ reply, err := iopodman.PullImage().Send(r.Conn, varlink.More, name, dockeroptions.DockerCertPath, "", signaturePolicyPath, tlsVerifyPtr)
+ if err != nil {
+ return nil, err
+ }
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ iid = responses.Id
+ if flags&varlink.Continues == 0 {
+ break
+ }
+ }
+ newImage, err := r.NewImageFromLocal(iid)
+ if err != nil {
+ return nil, err
+ }
+ return newImage, nil
+}
+
+// IsParent goes through the layers in the store and checks if i.TopLayer is
+// the parent of any other layer in store. Double check that image with that
+// layer exists as well.
+func (ci *ContainerImage) IsParent() (bool, error) {
+ return ci.remoteImage.isParent, nil
+}
+
+// ID returns the image ID as a string
+func (ci *ContainerImage) ID() string {
+ return ci.remoteImage.ID
+}
+
+// Names returns a string array of names associated with the image
+func (ci *ContainerImage) Names() []string {
+ return ci.remoteImage.Names
+}
+
+// Created returns the time the image was created
+func (ci *ContainerImage) Created() time.Time {
+ return ci.remoteImage.Created
+}
+
+// Size returns the size of the image
+func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) {
+ usize := uint64(ci.remoteImage.Size)
+ return &usize, nil
+}
+
+// Digest returns the image's digest
+func (ci *ContainerImage) Digest() digest.Digest {
+ return ci.remoteImage.Digest
+}
+
+// Labels returns a map of the image's labels
+func (ci *ContainerImage) Labels(ctx context.Context) (map[string]string, error) {
+ return ci.remoteImage.Labels, nil
+}
+
+// Dangling returns a bool if the image is "dangling"
+func (ci *ContainerImage) Dangling() bool {
+ return len(ci.Names()) == 0
+}
+
+// TagImage ...
+func (ci *ContainerImage) TagImage(tag string) error {
+ _, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag)
+ return err
+}
+
+// RemoveImage calls varlink to remove an image
+func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
+ return iopodman.RemoveImage().Call(r.Conn, img.InputName, force)
+}
+
+// History returns the history of an image and its layers
+func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) {
+ var imageHistories []*image.History
+
+ reply, err := iopodman.HistoryImage().Call(ci.Runtime.Conn, ci.InputName)
+ if err != nil {
+ return nil, err
+ }
+ for _, h := range reply {
+ created, err := time.ParseInLocation(time.RFC3339, h.Created, time.UTC)
+ if err != nil {
+ return nil, err
+ }
+ ih := image.History{
+ ID: h.Id,
+ Created: &created,
+ CreatedBy: h.CreatedBy,
+ Size: h.Size,
+ Comment: h.Comment,
+ }
+ imageHistories = append(imageHistories, &ih)
+ }
+ return imageHistories, nil
+}
+
+// LookupContainer gets basic information about container over a varlink
+// connection and then translates it to a *Container
+func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
+ state, err := r.ContainerState(idOrName)
+ if err != nil {
+ return nil, err
+ }
+ config := r.Config(idOrName)
+ if err != nil {
+ return nil, err
+ }
+
+ rc := remoteContainer{
+ r,
+ config,
+ state,
+ }
+
+ c := Container{
+ rc,
+ }
+ return &c, nil
+}
+
+func (r *LocalRuntime) GetLatestContainer() (*Container, error) {
+ return nil, libpod.ErrNotImplemented
+}
+
+// ContainerState returns the "state" of the container.
+func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, error) { //no-lint
+ reply, err := iopodman.ContainerStateData().Call(r.Conn, name)
+ if err != nil {
+ return nil, err
+ }
+ data := libpod.ContainerState{}
+ if err := json.Unmarshal([]byte(reply), &data); err != nil {
+ return nil, err
+ }
+ return &data, err
+
+}
+
+// Config returns a container config
+func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig {
+ // TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer
+ // further looking into it for after devconf.
+ // The libpod function for this has no errors so we are kind of in a tough
+ // spot here. Logging the errors for now.
+ reply, err := iopodman.ContainerConfig().Call(r.Conn, name)
+ if err != nil {
+ logrus.Error("call to container.config failed")
+ }
+ data := libpod.ContainerConfig{}
+ if err := json.Unmarshal([]byte(reply), &data); err != nil {
+ logrus.Error("failed to unmarshal container inspect data")
+ }
+ return &data
+
+}
+
+// PruneImages is the wrapper call for a remote-client to prune images
+func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
+ return iopodman.ImagesPrune().Call(r.Conn, all)
+}
+
+// Export is a wrapper to container export to a tarfile
+func (r *LocalRuntime) Export(name string, path string) error {
+ tempPath, err := iopodman.ExportContainer().Call(r.Conn, name, "")
+ if err != nil {
+ return err
+ }
+ return r.GetFileFromRemoteHost(tempPath, path, true)
+}
+
+func (r *LocalRuntime) GetFileFromRemoteHost(remoteFilePath, outputPath string, delete bool) error {
+ outputFile, err := os.Create(outputPath)
+ if err != nil {
+ return err
+ }
+ defer outputFile.Close()
+
+ writer := bufio.NewWriter(outputFile)
+ defer writer.Flush()
+
+ reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, remoteFilePath, delete)
+ if err != nil {
+ return err
+ }
+
+ length, _, err := reply()
+ if err != nil {
+ return errors.Wrap(err, "unable to get file length for transfer")
+ }
+
+ reader := r.Conn.Reader
+ if _, err := io.CopyN(writer, reader, length); err != nil {
+ return errors.Wrap(err, "file transer failed")
+ }
+ return nil
+}
+
+// Import implements the remote calls required to import a container image to the store
+func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
+ // First we send the file to the host
+ tempFile, err := r.SendFileOverVarlink(source)
+ if err != nil {
+ return "", err
+ }
+ return iopodman.ImportImage().Call(r.Conn, strings.TrimRight(tempFile, ":"), reference, history, changes, true)
+}
+
+func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) error {
+ buildOptions := iopodman.BuildOptions{
+ AddHosts: options.CommonBuildOpts.AddHost,
+ CgroupParent: options.CommonBuildOpts.CgroupParent,
+ CpuPeriod: int64(options.CommonBuildOpts.CPUPeriod),
+ CpuQuota: options.CommonBuildOpts.CPUQuota,
+ CpuShares: int64(options.CommonBuildOpts.CPUShares),
+ CpusetCpus: options.CommonBuildOpts.CPUSetMems,
+ CpusetMems: options.CommonBuildOpts.CPUSetMems,
+ Memory: options.CommonBuildOpts.Memory,
+ MemorySwap: options.CommonBuildOpts.MemorySwap,
+ ShmSize: options.CommonBuildOpts.ShmSize,
+ Ulimit: options.CommonBuildOpts.Ulimit,
+ Volume: options.CommonBuildOpts.Volumes,
+ }
+
+ buildinfo := iopodman.BuildInfo{
+ AdditionalTags: options.AdditionalTags,
+ Annotations: options.Annotations,
+ BuildArgs: options.Args,
+ BuildOptions: buildOptions,
+ CniConfigDir: options.CNIConfigDir,
+ CniPluginDir: options.CNIPluginPath,
+ Compression: string(options.Compression),
+ DefaultsMountFilePath: options.DefaultMountsFilePath,
+ Dockerfiles: dockerfiles,
+ //Err: string(options.Err),
+ ForceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ Iidfile: options.IIDFile,
+ Label: options.Labels,
+ Layers: options.Layers,
+ Nocache: options.NoCache,
+ //Out:
+ Output: options.Output,
+ OutputFormat: options.OutputFormat,
+ PullPolicy: options.PullPolicy.String(),
+ Quiet: options.Quiet,
+ RemoteIntermediateCtrs: options.RemoveIntermediateCtrs,
+ //ReportWriter:
+ RuntimeArgs: options.RuntimeArgs,
+ SignaturePolicyPath: options.SignaturePolicyPath,
+ Squash: options.Squash,
+ }
+ // tar the file
+ outputFile, err := ioutil.TempFile("", "varlink_tar_send")
+ if err != nil {
+ return err
+ }
+ defer outputFile.Close()
+ defer os.Remove(outputFile.Name())
+
+ // Create the tarball of the context dir to a tempfile
+ if err := utils.TarToFilesystem(options.ContextDirectory, outputFile); err != nil {
+ return err
+ }
+ // Send the context dir tarball over varlink.
+ tempFile, err := r.SendFileOverVarlink(outputFile.Name())
+ if err != nil {
+ return err
+ }
+ buildinfo.ContextDir = tempFile
+
+ reply, err := iopodman.BuildImage().Send(r.Conn, varlink.More, buildinfo)
+ if err != nil {
+ return err
+ }
+
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ return err
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ if flags&varlink.Continues == 0 {
+ break
+ }
+ }
+ return err
+}
+
+// SendFileOverVarlink sends a file over varlink in an upgraded connection
+func (r *LocalRuntime) SendFileOverVarlink(source string) (string, error) {
+ fs, err := os.Open(source)
+ if err != nil {
+ return "", err
+ }
+
+ fileInfo, err := fs.Stat()
+ if err != nil {
+ return "", err
+ }
+ logrus.Debugf("sending %s over varlink connection", source)
+ reply, err := iopodman.SendFile().Send(r.Conn, varlink.Upgrade, "", int64(fileInfo.Size()))
+ if err != nil {
+ return "", err
+ }
+ _, _, err = reply()
+ if err != nil {
+ return "", err
+ }
+
+ reader := bufio.NewReader(fs)
+ _, err = reader.WriteTo(r.Conn.Writer)
+ if err != nil {
+ return "", err
+ }
+ logrus.Debugf("file transfer complete for %s", source)
+ r.Conn.Writer.Flush()
+
+ // All was sent, wait for the ACK from the server
+ tempFile, err := r.Conn.Reader.ReadString(':')
+ if err != nil {
+ return "", err
+ }
+
+ // r.Conn is kaput at this point due to the upgrade
+ if err := r.RemoteRuntime.RefreshConnection(); err != nil {
+ return "", err
+
+ }
+
+ return strings.Replace(tempFile, ":", "", -1), nil
+}
+
+// GetAllVolumes retrieves all the volumes
+func (r *LocalRuntime) GetAllVolumes() ([]*libpod.Volume, error) {
+ return nil, libpod.ErrNotImplemented
+}
+
+// RemoveVolume removes a volumes
+func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force, prune bool) error {
+ return libpod.ErrNotImplemented
+}
+
+// GetContainers retrieves all containers from the state
+// Filters can be provided which will determine what containers are included in
+// the output. Multiple filters are handled by ANDing their output, so only
+// containers matching all filters are returned
+func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libpod.Container, error) {
+ return nil, libpod.ErrNotImplemented
+}
+
+// RemoveContainer removes the given container
+// If force is specified, the container will be stopped first
+// Otherwise, RemoveContainer will return an error if the container is running
+func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force, volumes bool) error {
+ return libpod.ErrNotImplemented
+}
+
+// CreateVolume creates a volume over a varlink connection for the remote client
+func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) {
+ cvOpts := iopodman.VolumeCreateOpts{
+ Options: opts,
+ Labels: labels,
+ }
+ if len(c.InputArgs) > 0 {
+ cvOpts.VolumeName = c.InputArgs[0]
+ }
+
+ if c.Flag("driver").Changed {
+ cvOpts.Driver = c.Driver
+ }
+
+ return iopodman.VolumeCreate().Call(r.Conn, cvOpts)
+}
+
+// RemoveVolumes removes volumes over a varlink connection for the remote client
+func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, error) {
+ rmOpts := iopodman.VolumeRemoveOpts{
+ All: c.All,
+ Force: c.Force,
+ Volumes: c.InputArgs,
+ }
+ return iopodman.VolumeRemove().Call(r.Conn, rmOpts)
+}
+
+func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
+
+ var (
+ tls *bool
+ tlsVerify bool
+ )
+ if dockerRegistryOptions.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue {
+ tlsVerify = false
+ tls = &tlsVerify
+ }
+ if dockerRegistryOptions.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ tlsVerify = true
+ tls = &tlsVerify
+ }
+
+ reply, err := iopodman.PushImage().Send(r.Conn, varlink.More, srcName, destination, tls, signaturePolicyPath, "", dockerRegistryOptions.DockerCertPath, forceCompress, manifestMIMEType, signingOptions.RemoveSignatures, signingOptions.SignBy)
+ if err != nil {
+ return err
+ }
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ return err
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ if flags&varlink.Continues == 0 {
+ break
+ }
+ }
+
+ return err
+}
+
+// InspectVolumes returns a slice of volumes based on an arg list or --all
+func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*Volume, error) {
+ reply, err := iopodman.GetVolumes().Call(r.Conn, c.InputArgs, c.All)
+ if err != nil {
+ return nil, err
+ }
+ return varlinkVolumeToVolume(r, reply), nil
+}
+
+//Volumes returns a slice of adapter.volumes based on information about libpod
+// volumes over a varlink connection
+func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) {
+ reply, err := iopodman.GetVolumes().Call(r.Conn, []string{}, true)
+ if err != nil {
+ return nil, err
+ }
+ return varlinkVolumeToVolume(r, reply), nil
+}
+
+func varlinkVolumeToVolume(r *LocalRuntime, volumes []iopodman.Volume) []*Volume {
+ var vols []*Volume
+ for _, v := range volumes {
+ volumeConfig := libpod.VolumeConfig{
+ Name: v.Name,
+ Labels: v.Labels,
+ MountPoint: v.MountPoint,
+ Driver: v.Driver,
+ Options: v.Options,
+ Scope: v.Scope,
+ }
+ n := remoteVolume{
+ Runtime: r,
+ config: &volumeConfig,
+ }
+ newVol := Volume{
+ n,
+ }
+ vols = append(vols, &newVol)
+ }
+ return vols
+}
+
+// PruneVolumes removes all unused volumes from the remote system
+func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
+ var errs []error
+ prunedNames, prunedErrors, err := iopodman.VolumesPrune().Call(r.Conn)
+ if err != nil {
+ return []string{}, []error{err}
+ }
+ // We need to transform the string results of the error into actual error types
+ for _, e := range prunedErrors {
+ errs = append(errs, errors.New(e))
+ }
+ return prunedNames, errs
+}
+
+// SaveImage is a wrapper function for saving an image to the local filesystem
+func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error {
+ source := c.InputArgs[0]
+ additionalTags := c.InputArgs[1:]
+
+ options := iopodman.ImageSaveOptions{
+ Name: source,
+ Format: c.Format,
+ Output: c.Output,
+ MoreTags: additionalTags,
+ Quiet: c.Quiet,
+ Compress: c.Compress,
+ }
+ reply, err := iopodman.ImageSave().Send(r.Conn, varlink.More, options)
+ if err != nil {
+ return err
+ }
+
+ var fetchfile string
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ return err
+ }
+ if len(responses.Id) > 0 {
+ fetchfile = responses.Id
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ if flags&varlink.Continues == 0 {
+ break
+ }
+
+ }
+ if err != nil {
+ return err
+ }
+
+ outputToDir := false
+ outfile := c.Output
+ var outputFile *os.File
+ // If the result is supposed to be a dir, then we need to put the tarfile
+ // from the host in a temporary file
+ if options.Format != "oci-archive" && options.Format != "docker-archive" {
+ outputToDir = true
+ outputFile, err = ioutil.TempFile("", "saveimage_tempfile")
+ if err != nil {
+ return err
+ }
+ outfile = outputFile.Name()
+ defer outputFile.Close()
+ defer os.Remove(outputFile.Name())
+ }
+ // We now need to fetch the tarball result back to the more system
+ if err := r.GetFileFromRemoteHost(fetchfile, outfile, true); err != nil {
+ return err
+ }
+
+ // If the result is a tarball, we're done
+ // If it is a dir, we need to untar the temporary file into the dir
+ if outputToDir {
+ if err := utils.UntarToFileSystem(c.Output, outputFile, &archive.TarOptions{}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// LoadImage loads a container image from a remote client's filesystem
+func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) {
+ var names string
+ remoteTempFile, err := r.SendFileOverVarlink(cli.Input)
+ if err != nil {
+ return "", nil
+ }
+ more := varlink.More
+ if cli.Quiet {
+ more = 0
+ }
+ reply, err := iopodman.LoadImage().Send(r.Conn, uint64(more), name, remoteTempFile, cli.Quiet, true)
+ if err != nil {
+ return "", err
+ }
+
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ logrus.Error(err)
+ return "", err
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ names = responses.Id
+ if flags&varlink.Continues == 0 {
+ break
+ }
+ }
+ return names, nil
+}
diff --git a/pkg/adapter/shortcuts/shortcuts.go b/pkg/adapter/shortcuts/shortcuts.go
new file mode 100644
index 000000000..0633399ae
--- /dev/null
+++ b/pkg/adapter/shortcuts/shortcuts.go
@@ -0,0 +1,27 @@
+package shortcuts
+
+import "github.com/containers/libpod/libpod"
+
+// GetPodsByContext gets pods whether all, latest, or a slice of names/ids
+func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) {
+ var outpods []*libpod.Pod
+ if all {
+ return runtime.GetAllPods()
+ }
+ if latest {
+ p, err := runtime.GetLatestPod()
+ if err != nil {
+ return nil, err
+ }
+ outpods = append(outpods, p)
+ return outpods, nil
+ }
+ for _, p := range pods {
+ pod, err := runtime.LookupPod(p)
+ if err != nil {
+ return nil, err
+ }
+ outpods = append(outpods, pod)
+ }
+ return outpods, nil
+}
diff --git a/pkg/adapter/volumes_remote.go b/pkg/adapter/volumes_remote.go
new file mode 100644
index 000000000..beacd943a
--- /dev/null
+++ b/pkg/adapter/volumes_remote.go
@@ -0,0 +1,33 @@
+// +build remoteclient
+
+package adapter
+
+// Name returns the name of the volume
+func (v *Volume) Name() string {
+ return v.config.Name
+}
+
+//Labels returns the labels for a volume
+func (v *Volume) Labels() map[string]string {
+ return v.config.Labels
+}
+
+// Driver returns the driver for the volume
+func (v *Volume) Driver() string {
+ return v.config.Driver
+}
+
+// Options returns the options a volume was created with
+func (v *Volume) Options() map[string]string {
+ return v.config.Options
+}
+
+// MountPath returns the path the volume is mounted to
+func (v *Volume) MountPoint() string {
+ return v.config.MountPoint
+}
+
+// Scope returns the scope for an adapter.volume
+func (v *Volume) Scope() string {
+ return v.config.Scope
+}
diff --git a/pkg/registries/registries.go b/pkg/registries/registries.go
index cbb8b730c..9f4c94533 100644
--- a/pkg/registries/registries.go
+++ b/pkg/registries/registries.go
@@ -3,10 +3,12 @@ package registries
import (
"os"
"path/filepath"
+ "strings"
"github.com/containers/image/pkg/sysregistries"
"github.com/containers/image/types"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/docker/distribution/reference"
"github.com/pkg/errors"
)
@@ -49,3 +51,17 @@ func GetInsecureRegistries() ([]string, error) {
}
return registries, nil
}
+
+// GetRegistry returns the registry name from a string if specified
+func GetRegistry(image string) (string, error) {
+ // It is possible to only have the registry name in the format "myregistry/"
+ // if so, just trim the "/" from the end and return the registry name
+ if strings.HasSuffix(image, "/") {
+ return strings.TrimSuffix(image, "/"), nil
+ }
+ imgRef, err := reference.Parse(image)
+ if err != nil {
+ return "", err
+ }
+ return reference.Domain(imgRef.(reference.Named)), nil
+}
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 279a03d3f..dfbc7fe33 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -12,6 +12,7 @@
#include <fcntl.h>
#include <sys/wait.h>
#include <string.h>
+#include <stdbool.h>
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
@@ -108,6 +109,13 @@ reexec_userns_join (int userns, int mountns)
char uid[16];
char **argv;
int pid;
+ char *cwd = getcwd (NULL, 0);
+
+ if (cwd == NULL)
+ {
+ fprintf (stderr, "error getting current working directory: %s\n", strerror (errno));
+ _exit (EXIT_FAILURE);
+ }
sprintf (uid, "%d", geteuid ());
@@ -153,6 +161,13 @@ reexec_userns_join (int userns, int mountns)
_exit (EXIT_FAILURE);
}
+ if (chdir (cwd) < 0)
+ {
+ fprintf (stderr, "cannot chdir: %s\n", strerror (errno));
+ _exit (EXIT_FAILURE);
+ }
+ free (cwd);
+
execvp (argv[0], argv);
_exit (EXIT_FAILURE);
@@ -186,6 +201,25 @@ reexec_in_user_namespace (int ready)
pid_t ppid = getpid ();
char **argv;
char uid[16];
+ char *listen_fds = NULL;
+ char *listen_pid = NULL;
+ bool do_socket_activation = false;
+ char *cwd = getcwd (NULL, 0);
+
+ if (cwd == NULL)
+ {
+ fprintf (stderr, "error getting current working directory: %s\n", strerror (errno));
+ _exit (EXIT_FAILURE);
+ }
+
+ listen_pid = getenv("LISTEN_PID");
+ listen_fds = getenv("LISTEN_FDS");
+
+ if (listen_pid != NULL && listen_fds != NULL) {
+ if (strtol(listen_pid, NULL, 10) == getpid()) {
+ do_socket_activation = true;
+ }
+ }
sprintf (uid, "%d", geteuid ());
@@ -197,8 +231,22 @@ reexec_in_user_namespace (int ready)
check_proc_sys_userns_file (_max_user_namespaces);
check_proc_sys_userns_file (_unprivileged_user_namespaces);
}
- if (pid)
+ if (pid) {
+ if (do_socket_activation) {
+ long num_fds;
+ num_fds = strtol(listen_fds, NULL, 10);
+ if (num_fds != LONG_MIN && num_fds != LONG_MAX) {
+ long i;
+ for (i = 0; i < num_fds; i++) {
+ close(3+i);
+ }
+ }
+ unsetenv("LISTEN_PID");
+ unsetenv("LISTEN_FDS");
+ unsetenv("LISTEN_FDNAMES");
+ }
return pid;
+ }
argv = get_cmd_line_args (ppid);
if (argv == NULL)
@@ -207,6 +255,12 @@ reexec_in_user_namespace (int ready)
_exit (EXIT_FAILURE);
}
+ if (do_socket_activation) {
+ char s[32];
+ sprintf(s, "%d", getpid());
+ setenv("LISTEN_PID", s, true);
+ }
+
setenv ("_LIBPOD_USERNS_CONFIGURED", "init", 1);
setenv ("_LIBPOD_ROOTLESS_UID", uid, 1);
@@ -232,6 +286,13 @@ reexec_in_user_namespace (int ready)
_exit (EXIT_FAILURE);
}
+ if (chdir (cwd) < 0)
+ {
+ fprintf (stderr, "cannot chdir: %s\n", strerror (errno));
+ _exit (EXIT_FAILURE);
+ }
+ free (cwd);
+
execvp (argv[0], argv);
_exit (EXIT_FAILURE);
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 344f4afb9..50e07ee74 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
+ "github.com/containers/storage/pkg/stringid"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-connections/nat"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -134,8 +135,8 @@ type CreateConfig struct {
SeccompProfilePath string //SecurityOpts
SecurityOpts []string
Rootfs string
- LocalVolumes []string //Keeps track of the built-in volumes of container used in the --volumes-from flag
- Syslog bool // Whether to enable syslog on exit commands
+ LocalVolumes []spec.Mount //Keeps track of the built-in volumes of container used in the --volumes-from flag
+ Syslog bool // Whether to enable syslog on exit commands
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@@ -216,7 +217,7 @@ func (c *CreateConfig) initFSMounts() []spec.Mount {
//GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, error) {
- var m []spec.Mount
+ m := c.LocalVolumes
for _, i := range c.Volumes {
var options []string
spliti := strings.Split(i, ":")
@@ -234,22 +235,31 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
logrus.Debugf("User mount %s:%s options %v", spliti[0], spliti[1], options)
}
- // volumes from image config
- if c.ImageVolumeType != "tmpfs" {
+ if c.ImageVolumeType == "ignore" {
return m, nil
}
+
for vol := range c.BuiltinImgVolumes {
if libpod.MountExists(specMounts, vol) {
continue
}
+
mount := spec.Mount{
Destination: vol,
- Type: string(TypeTmpfs),
- Source: string(TypeTmpfs),
- Options: []string{"rprivate", "rw", "noexec", "nosuid", "nodev", "tmpcopyup"},
+ Type: c.ImageVolumeType,
+ Options: []string{"rprivate", "rw", "nodev"},
+ }
+ if c.ImageVolumeType == "tmpfs" {
+ mount.Source = "tmpfs"
+ mount.Options = append(mount.Options, "tmpcopyup")
+ } else {
+ // This will cause a new local Volume to be created on your system
+ mount.Source = stringid.GenerateNonCryptoID()
+ mount.Options = append(mount.Options, "bind")
}
m = append(m, mount)
}
+
return m, nil
}
@@ -257,6 +267,11 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
// and adds it to c.Volumes of the current container.
func (c *CreateConfig) GetVolumesFrom() error {
var options string
+
+ if rootless.SkipStorageSetup() {
+ return nil
+ }
+
for _, vol := range c.VolumesFrom {
splitVol := strings.SplitN(vol, ":", 2)
if len(splitVol) == 2 {
@@ -266,6 +281,10 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err != nil {
return errors.Wrapf(err, "error looking up container %q", splitVol[0])
}
+ inspect, err := ctr.Inspect(false)
+ if err != nil {
+ return errors.Wrapf(err, "error inspecting %q", splitVol[0])
+ }
var createArtifact CreateConfig
artifact, err := ctr.GetArtifact("create-config")
if err != nil {
@@ -274,9 +293,13 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err := json.Unmarshal(artifact, &createArtifact); err != nil {
return err
}
-
for key := range createArtifact.BuiltinImgVolumes {
- c.LocalVolumes = append(c.LocalVolumes, key)
+ for _, m := range inspect.Mounts {
+ if m.Destination == key {
+ c.LocalVolumes = append(c.LocalVolumes, m)
+ break
+ }
+ }
}
for _, i := range createArtifact.Volumes {
@@ -331,13 +354,22 @@ func (c *CreateConfig) createExitCommand() []string {
"--cgroup-manager", config.CgroupManager,
"--tmpdir", config.TmpDir,
}
+ if config.OCIRuntime != "" {
+ command = append(command, []string{"--runtime", config.OCIRuntime}...)
+ }
if config.StorageConfig.GraphDriverName != "" {
command = append(command, []string{"--storage-driver", config.StorageConfig.GraphDriverName}...)
}
if c.Syslog {
command = append(command, "--syslog")
}
- return append(command, []string{"container", "cleanup"}...)
+ command = append(command, []string{"container", "cleanup"}...)
+
+ if c.Rm {
+ command = append(command, "--rm")
+ }
+
+ return command
}
// GetContainerCreateOptions takes a CreateConfig and returns a slice of CtrCreateOptions
@@ -414,7 +446,15 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
}
if IsNS(string(c.NetMode)) {
- // pass
+ split := strings.SplitN(string(c.NetMode), ":", 2)
+ if len(split[0]) != 2 {
+ return nil, errors.Errorf("invalid user defined network namespace %q", c.NetMode.UserDefined())
+ }
+ _, err := os.Stat(split[1])
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, libpod.WithNetNS(portBindings, false, string(c.NetMode), networks))
} else if c.NetMode.IsContainer() {
connectedCtr, err := c.Runtime.LookupContainer(c.NetMode.Container())
if err != nil {
@@ -515,11 +555,9 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
if c.CgroupParent != "" {
options = append(options, libpod.WithCgroupParent(c.CgroupParent))
}
- // For a rootless container always cleanup the storage/network as they
- // run in a different namespace thus not reusable when we restart.
- if c.Detach || rootless.IsRootless() {
- options = append(options, libpod.WithExitCommand(c.createExitCommand()))
- }
+
+ // Always use a cleanup process to clean up Podman after termination
+ options = append(options, libpod.WithExitCommand(c.createExitCommand()))
return options, nil
}
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index 76b8963ff..28a636fa6 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -3,10 +3,12 @@ package createconfig
import (
"os"
"path"
+ "path/filepath"
"strings"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/mount"
+ pmount "github.com/containers/storage/pkg/mount"
"github.com/docker/docker/daemon/caps"
"github.com/docker/go-units"
"github.com/opencontainers/runc/libcontainer/user"
@@ -392,9 +394,65 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
configSpec.Linux.Resources = &spec.LinuxResources{}
}
+ // Make sure that the bind mounts keep options like nosuid, noexec, nodev.
+ mounts, err := pmount.GetMounts()
+ if err != nil {
+ return nil, err
+ }
+ for i := range configSpec.Mounts {
+ m := &configSpec.Mounts[i]
+ isBind := false
+ for _, o := range m.Options {
+ if o == "bind" || o == "rbind" {
+ isBind = true
+ break
+ }
+ }
+ if !isBind {
+ continue
+ }
+ mount, err := findMount(m.Source, mounts)
+ if err != nil {
+ return nil, err
+ }
+ if mount == nil {
+ continue
+ }
+ next_option:
+ for _, o := range strings.Split(mount.Opts, ",") {
+ if o == "nosuid" || o == "noexec" || o == "nodev" {
+ for _, e := range m.Options {
+ if e == o {
+ continue next_option
+ }
+ }
+ m.Options = append(m.Options, o)
+ }
+ }
+ }
+
return configSpec, nil
}
+func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) {
+ var err error
+ target, err = filepath.Abs(target)
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot resolve %s", target)
+ }
+ var bestSoFar *pmount.Info
+ for _, i := range mounts {
+ if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) {
+ // Won't be better than what we have already found
+ continue
+ }
+ if strings.HasPrefix(target, i.Mountpoint) {
+ bestSoFar = i
+ }
+ }
+ return bestSoFar, nil
+}
+
func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) {
if config.PidMode.IsHost() && rootless.IsRootless() {
return
diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go
new file mode 100644
index 000000000..cae76dee8
--- /dev/null
+++ b/pkg/tracing/tracing.go
@@ -0,0 +1,28 @@
+package tracing
+
+import (
+ "fmt"
+ "io"
+
+ opentracing "github.com/opentracing/opentracing-go"
+ jaeger "github.com/uber/jaeger-client-go"
+ config "github.com/uber/jaeger-client-go/config"
+)
+
+// Init returns an instance of Jaeger Tracer that samples 100% of traces and logs all spans to stdout.
+func Init(service string) (opentracing.Tracer, io.Closer) {
+ cfg := &config.Configuration{
+ Sampler: &config.SamplerConfig{
+ Type: "const",
+ Param: 1,
+ },
+ Reporter: &config.ReporterConfig{
+ LogSpans: true,
+ },
+ }
+ tracer, closer, err := cfg.New(service, config.Logger(jaeger.StdLogger))
+ if err != nil {
+ panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
+ }
+ return tracer, closer
+}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 52f431881..db8a3d5bb 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -259,8 +259,8 @@ func GetRootlessStorageOpts() (storage.StoreOptions, error) {
return opts, nil
}
-// GetRootlessVolumeInfo returns where all the name volumes will be created in rootless mode
-func GetRootlessVolumeInfo() (string, error) {
+// GetRootlessVolumePath returns where all the name volumes will be created in rootless mode
+func GetRootlessVolumePath() (string, error) {
dataDir, _, err := GetRootlessDirInfo()
if err != nil {
return "", err
@@ -307,15 +307,13 @@ func GetDefaultStoreOptions() (storage.StoreOptions, string, error) {
err error
)
storageOpts := storage.DefaultStoreOptions
- volumePath := "/var/lib/containers/storage"
-
+ volumePath := filepath.Join(storageOpts.GraphRoot, "volumes")
if rootless.IsRootless() {
storageOpts, err = GetRootlessStorageOpts()
if err != nil {
return storageOpts, volumePath, err
}
-
- volumePath, err = GetRootlessVolumeInfo()
+ volumePath, err = GetRootlessVolumePath()
if err != nil {
return storageOpts, volumePath, err
}
diff --git a/pkg/varlinkapi/config.go b/pkg/varlinkapi/config.go
index 8dd217b77..f557d04e5 100644
--- a/pkg/varlinkapi/config.go
+++ b/pkg/varlinkapi/config.go
@@ -1,20 +1,21 @@
package varlinkapi
import (
+ "github.com/containers/libpod/cmd/podman/cliconfig"
iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
- "github.com/urfave/cli"
+ "github.com/spf13/cobra"
)
// LibpodAPI is the basic varlink struct for libpod
type LibpodAPI struct {
- Cli *cli.Context
+ Cli *cobra.Command
iopodman.VarlinkInterface
Runtime *libpod.Runtime
}
// New creates a new varlink client
-func New(cli *cli.Context, runtime *libpod.Runtime) *iopodman.VarlinkInterface {
- lp := LibpodAPI{Cli: cli, Runtime: runtime}
+func New(cli *cliconfig.PodmanCommand, runtime *libpod.Runtime) *iopodman.VarlinkInterface {
+ lp := LibpodAPI{Cli: cli.Command, Runtime: runtime}
return iopodman.VarlinkNew(&lp)
}
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index 737e2dd96..ad9f107a7 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -21,7 +21,7 @@ import (
// ListContainers ...
func (i *LibpodAPI) ListContainers(call iopodman.VarlinkCall) error {
var (
- listContainers []iopodman.ListContainerData
+ listContainers []iopodman.Container
)
containers, err := i.Runtime.GetAllContainers()
@@ -44,10 +44,10 @@ func (i *LibpodAPI) ListContainers(call iopodman.VarlinkCall) error {
}
// GetContainer ...
-func (i *LibpodAPI) GetContainer(call iopodman.VarlinkCall, name string) error {
- ctr, err := i.Runtime.LookupContainer(name)
+func (i *LibpodAPI) GetContainer(call iopodman.VarlinkCall, id string) error {
+ ctr, err := i.Runtime.LookupContainer(id)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(id, err.Error())
}
opts := shared.PsOptions{
Namespace: true,
@@ -64,7 +64,7 @@ func (i *LibpodAPI) GetContainer(call iopodman.VarlinkCall, name string) error {
func (i *LibpodAPI) InspectContainer(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
inspectInfo, err := ctr.Inspect(true)
if err != nil {
@@ -90,7 +90,7 @@ func (i *LibpodAPI) InspectContainer(call iopodman.VarlinkCall, name string) err
func (i *LibpodAPI) ListContainerProcesses(call iopodman.VarlinkCall, name string, opts []string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
containerState, err := ctr.State()
if err != nil {
@@ -118,7 +118,7 @@ func (i *LibpodAPI) GetContainerLogs(call iopodman.VarlinkCall, name string) err
var logs []string
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
logPath := ctr.LogPath()
@@ -198,7 +198,7 @@ func (i *LibpodAPI) ListContainerChanges(call iopodman.VarlinkCall, name string)
func (i *LibpodAPI) ExportContainer(call iopodman.VarlinkCall, name, outPath string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
outputFile, err := ioutil.TempFile("", "varlink_recv")
if err != nil {
@@ -220,7 +220,7 @@ func (i *LibpodAPI) ExportContainer(call iopodman.VarlinkCall, name, outPath str
func (i *LibpodAPI) GetContainerStats(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
containerStats, err := ctr.GetContainerStats(&libpod.ContainerStats{})
if err != nil {
@@ -247,16 +247,11 @@ func (i *LibpodAPI) GetContainerStats(call iopodman.VarlinkCall, name string) er
return call.ReplyGetContainerStats(cs)
}
-// ResizeContainerTty ...
-func (i *LibpodAPI) ResizeContainerTty(call iopodman.VarlinkCall) error {
- return call.ReplyMethodNotImplemented("ResizeContainerTty")
-}
-
// StartContainer ...
func (i *LibpodAPI) StartContainer(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
state, err := ctr.State()
if err != nil {
@@ -265,7 +260,7 @@ func (i *LibpodAPI) StartContainer(call iopodman.VarlinkCall, name string) error
if state == libpod.ContainerStateRunning || state == libpod.ContainerStatePaused {
return call.ReplyErrorOccurred("container is already running or paused")
}
- if err := ctr.Start(getContext()); err != nil {
+ if err := ctr.Start(getContext(), false); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
return call.ReplyStartContainer(ctr.ID())
@@ -275,7 +270,7 @@ func (i *LibpodAPI) StartContainer(call iopodman.VarlinkCall, name string) error
func (i *LibpodAPI) StopContainer(call iopodman.VarlinkCall, name string, timeout int64) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
if err := ctr.StopWithTimeout(uint(timeout)); err != nil && err != libpod.ErrCtrStopped {
return call.ReplyErrorOccurred(err.Error())
@@ -287,7 +282,7 @@ func (i *LibpodAPI) StopContainer(call iopodman.VarlinkCall, name string, timeou
func (i *LibpodAPI) RestartContainer(call iopodman.VarlinkCall, name string, timeout int64) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
if err := ctr.RestartWithTimeout(getContext(), uint(timeout)); err != nil {
return call.ReplyErrorOccurred(err.Error())
@@ -316,7 +311,7 @@ func (i *LibpodAPI) KillContainer(call iopodman.VarlinkCall, name string, signal
}
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
if err := ctr.Kill(killSignal); err != nil {
return call.ReplyErrorOccurred(err.Error())
@@ -324,21 +319,11 @@ func (i *LibpodAPI) KillContainer(call iopodman.VarlinkCall, name string, signal
return call.ReplyKillContainer(ctr.ID())
}
-// UpdateContainer ...
-func (i *LibpodAPI) UpdateContainer(call iopodman.VarlinkCall) error {
- return call.ReplyMethodNotImplemented("UpdateContainer")
-}
-
-// RenameContainer ...
-func (i *LibpodAPI) RenameContainer(call iopodman.VarlinkCall) error {
- return call.ReplyMethodNotImplemented("RenameContainer")
-}
-
// PauseContainer ...
func (i *LibpodAPI) PauseContainer(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
if err := ctr.Pause(); err != nil {
return call.ReplyErrorOccurred(err.Error())
@@ -350,7 +335,7 @@ func (i *LibpodAPI) PauseContainer(call iopodman.VarlinkCall, name string) error
func (i *LibpodAPI) UnpauseContainer(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
if err := ctr.Unpause(); err != nil {
return call.ReplyErrorOccurred(err.Error())
@@ -358,17 +343,11 @@ func (i *LibpodAPI) UnpauseContainer(call iopodman.VarlinkCall, name string) err
return call.ReplyUnpauseContainer(ctr.ID())
}
-// AttachToContainer ...
-// TODO: DO we also want a different one for websocket?
-func (i *LibpodAPI) AttachToContainer(call iopodman.VarlinkCall) error {
- return call.ReplyMethodNotImplemented("AttachToContainer")
-}
-
// WaitContainer ...
func (i *LibpodAPI) WaitContainer(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
exitCode, err := ctr.Wait()
if err != nil {
@@ -379,13 +358,13 @@ func (i *LibpodAPI) WaitContainer(call iopodman.VarlinkCall, name string) error
}
// RemoveContainer ...
-func (i *LibpodAPI) RemoveContainer(call iopodman.VarlinkCall, name string, force bool) error {
+func (i *LibpodAPI) RemoveContainer(call iopodman.VarlinkCall, name string, force bool, removeVolumes bool) error {
ctx := getContext()
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
- if err := i.Runtime.RemoveContainer(ctx, ctr, force); err != nil {
+ if err := i.Runtime.RemoveContainer(ctx, ctr, force, removeVolumes); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
return call.ReplyRemoveContainer(ctr.ID())
@@ -406,7 +385,7 @@ func (i *LibpodAPI) DeleteStoppedContainers(call iopodman.VarlinkCall) error {
return call.ReplyErrorOccurred(err.Error())
}
if state != libpod.ContainerStateRunning {
- if err := i.Runtime.RemoveContainer(ctx, ctr, false); err != nil {
+ if err := i.Runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
deletedContainers = append(deletedContainers, ctr.ID())
@@ -419,7 +398,7 @@ func (i *LibpodAPI) DeleteStoppedContainers(call iopodman.VarlinkCall) error {
func (i *LibpodAPI) GetAttachSockets(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
status, err := ctr.State()
@@ -448,7 +427,7 @@ func (i *LibpodAPI) ContainerCheckpoint(call iopodman.VarlinkCall, name string,
ctx := getContext()
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
options := libpod.ContainerCheckpointOptions{
@@ -467,7 +446,7 @@ func (i *LibpodAPI) ContainerRestore(call iopodman.VarlinkCall, name string, kee
ctx := getContext()
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
options := libpod.ContainerCheckpointOptions{
@@ -496,7 +475,7 @@ func getArtifact(ctr *libpod.Container) (*cc.CreateConfig, error) {
func (i *LibpodAPI) ContainerConfig(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyErrorOccurred(err.Error())
+ return call.ReplyContainerNotFound(name, err.Error())
}
config := ctr.Config()
b, err := json.Marshal(config)
@@ -510,7 +489,7 @@ func (i *LibpodAPI) ContainerConfig(call iopodman.VarlinkCall, name string) erro
func (i *LibpodAPI) ContainerArtifacts(call iopodman.VarlinkCall, name, artifactName string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyErrorOccurred(err.Error())
+ return call.ReplyContainerNotFound(name, err.Error())
}
artifacts, err := ctr.GetArtifact(artifactName)
if err != nil {
@@ -527,7 +506,7 @@ func (i *LibpodAPI) ContainerArtifacts(call iopodman.VarlinkCall, name, artifact
func (i *LibpodAPI) ContainerInspectData(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyErrorOccurred(err.Error())
+ return call.ReplyContainerNotFound(name, err.Error())
}
data, err := ctr.Inspect(true)
if err != nil {
@@ -545,7 +524,7 @@ func (i *LibpodAPI) ContainerInspectData(call iopodman.VarlinkCall, name string)
func (i *LibpodAPI) ContainerStateData(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyErrorOccurred(err.Error())
+ return call.ReplyContainerNotFound(name, err.Error())
}
data, err := ctr.ContainerState()
if err != nil {
diff --git a/pkg/varlinkapi/containers_create.go b/pkg/varlinkapi/containers_create.go
index f1835a189..6b53b22c6 100644
--- a/pkg/varlinkapi/containers_create.go
+++ b/pkg/varlinkapi/containers_create.go
@@ -131,9 +131,14 @@ func varlinkCreateToCreateConfig(ctx context.Context, create iopodman.Create, ru
}
imageID := data.ID
+ var ImageVolumes map[string]struct{}
+ if data != nil && create.Image_volume_type != "ignore" {
+ ImageVolumes = data.Config.Volumes
+ }
+
config := &cc.CreateConfig{
Runtime: runtime,
- BuiltinImgVolumes: data.Config.Volumes,
+ BuiltinImgVolumes: ImageVolumes,
ConmonPidFile: create.Conmon_pidfile,
ImageVolumeType: create.Image_volume_type,
CapAdd: create.Cap_add,
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index 5e0889645..210f139ce 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
- "github.com/containers/image/docker"
dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/manifest"
"github.com/containers/image/transports/alltransports"
@@ -21,13 +21,13 @@ import (
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
- sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
- "github.com/docker/go-units"
+ "github.com/containers/storage/pkg/archive"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// ListImages lists all the images in the store
@@ -37,7 +37,7 @@ func (i *LibpodAPI) ListImages(call iopodman.VarlinkCall) error {
if err != nil {
return call.ReplyErrorOccurred(fmt.Sprintf("unable to get list of images %q", err))
}
- var imageList []iopodman.ImageInList
+ var imageList []iopodman.Image
for _, image := range images {
labels, _ := image.Labels(getContext())
containers, _ := image.Containers()
@@ -52,12 +52,12 @@ func (i *LibpodAPI) ListImages(call iopodman.VarlinkCall) error {
return call.ReplyErrorOccurred(err.Error())
}
- i := iopodman.ImageInList{
+ i := iopodman.Image{
Id: image.ID(),
ParentId: image.Parent,
RepoTags: image.Names(),
RepoDigests: repoDigests,
- Created: image.Created().String(),
+ Created: image.Created().Format(time.RFC3339),
Size: int64(*size),
VirtualSize: image.VirtualSize,
Containers: int64(len(containers)),
@@ -69,11 +69,11 @@ func (i *LibpodAPI) ListImages(call iopodman.VarlinkCall) error {
return call.ReplyListImages(imageList)
}
-// GetImage returns a single image in the form of a ImageInList
-func (i *LibpodAPI) GetImage(call iopodman.VarlinkCall, name string) error {
- newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
+// GetImage returns a single image in the form of a Image
+func (i *LibpodAPI) GetImage(call iopodman.VarlinkCall, id string) error {
+ newImage, err := i.Runtime.ImageRuntime().NewFromLocal(id)
if err != nil {
- return call.ReplyImageNotFound(err.Error())
+ return call.ReplyImageNotFound(id, err.Error())
}
labels, err := newImage.Labels(getContext())
if err != nil {
@@ -92,12 +92,12 @@ func (i *LibpodAPI) GetImage(call iopodman.VarlinkCall, name string) error {
return err
}
- il := iopodman.ImageInList{
+ il := iopodman.Image{
Id: newImage.ID(),
ParentId: newImage.Parent,
RepoTags: newImage.Names(),
RepoDigests: repoDigests,
- Created: newImage.Created().String(),
+ Created: newImage.Created().Format(time.RFC3339),
Size: int64(*size),
VirtualSize: newImage.VirtualSize,
Containers: int64(len(containers)),
@@ -109,83 +109,46 @@ func (i *LibpodAPI) GetImage(call iopodman.VarlinkCall, name string) error {
// BuildImage ...
func (i *LibpodAPI) BuildImage(call iopodman.VarlinkCall, config iopodman.BuildInfo) error {
var (
- memoryLimit int64
- memorySwap int64
- namespace []buildah.NamespaceOption
- err error
+ namespace []buildah.NamespaceOption
+ err error
)
systemContext := types.SystemContext{}
- dockerfiles := config.Dockerfile
- contextDir := ""
-
- for i := range dockerfiles {
- if strings.HasPrefix(dockerfiles[i], "http://") ||
- strings.HasPrefix(dockerfiles[i], "https://") ||
- strings.HasPrefix(dockerfiles[i], "git://") ||
- strings.HasPrefix(dockerfiles[i], "github.com/") {
- continue
- }
- absFile, err := filepath.Abs(dockerfiles[i])
- if err != nil {
- return errors.Wrapf(err, "error determining path to file %q", dockerfiles[i])
- }
- contextDir = filepath.Dir(absFile)
- dockerfiles[i], err = filepath.Rel(contextDir, absFile)
- if err != nil {
- return errors.Wrapf(err, "error determining path to file %q", dockerfiles[i])
- }
- break
- }
-
- pullPolicy := imagebuildah.PullNever
- if config.Pull {
- pullPolicy = imagebuildah.PullIfMissing
- }
+ contextDir := config.ContextDir
- if config.Pull_always {
- pullPolicy = imagebuildah.PullAlways
- }
- manifestType := "oci" //nolint
- if config.Image_format != "" {
- manifestType = config.Image_format
+ newContextDir, err := ioutil.TempDir("", "buildTarball")
+ if err != nil {
+ call.ReplyErrorOccurred("unable to create tempdir")
}
+ logrus.Debugf("created new context dir at %s", newContextDir)
- if strings.HasPrefix(manifestType, "oci") {
- manifestType = buildah.OCIv1ImageManifest
- } else if strings.HasPrefix(manifestType, "docker") {
- manifestType = buildah.Dockerv2ImageManifest
- } else {
- return call.ReplyErrorOccurred(fmt.Sprintf("unrecognized image type %q", manifestType))
+ reader, err := os.Open(contextDir)
+ if err != nil {
+ logrus.Errorf("failed to open the context dir tar file %s", contextDir)
+ return call.ReplyErrorOccurred(fmt.Sprintf("unable to open context dir tar file %s", contextDir))
}
-
- if config.Memory != "" {
- memoryLimit, err = units.RAMInBytes(config.Memory)
- if err != nil {
- return call.ReplyErrorOccurred(err.Error())
- }
+ defer reader.Close()
+ if err := archive.Untar(reader, newContextDir, &archive.TarOptions{}); err != nil {
+ logrus.Errorf("fail to untar the context dir tarball (%s) to the context dir (%s)", contextDir, newContextDir)
+ return call.ReplyErrorOccurred(fmt.Sprintf("unable to untar context dir %s", contextDir))
}
+ logrus.Debugf("untar of %s successful", contextDir)
- if config.Memory_swap != "" {
- memorySwap, err = units.RAMInBytes(config.Memory_swap)
- if err != nil {
- return call.ReplyErrorOccurred(err.Error())
- }
- }
+ // All output (stdout, stderr) is captured in output as well
+ var output bytes.Buffer
- output := bytes.NewBuffer([]byte{})
commonOpts := &buildah.CommonBuildOptions{
- AddHost: config.Add_hosts,
- CgroupParent: config.Cgroup_parent,
- CPUPeriod: uint64(config.Cpu_period),
- CPUQuota: config.Cpu_quota,
- CPUSetCPUs: config.Cpuset_cpus,
- CPUSetMems: config.Cpuset_mems,
- Memory: memoryLimit,
- MemorySwap: memorySwap,
- ShmSize: config.Shm_size,
- Ulimit: config.Ulimit,
- Volumes: config.Volume,
+ AddHost: config.BuildOptions.AddHosts,
+ CgroupParent: config.BuildOptions.CgroupParent,
+ CPUPeriod: uint64(config.BuildOptions.CpuPeriod),
+ CPUQuota: config.BuildOptions.CpuQuota,
+ CPUSetCPUs: config.BuildOptions.CpusetCpus,
+ CPUSetMems: config.BuildOptions.CpusetMems,
+ Memory: config.BuildOptions.Memory,
+ MemorySwap: config.BuildOptions.MemorySwap,
+ ShmSize: config.BuildOptions.ShmSize,
+ Ulimit: config.BuildOptions.Ulimit,
+ Volumes: config.BuildOptions.Volume,
}
hostNetwork := buildah.NamespaceOption{
@@ -196,37 +159,74 @@ func (i *LibpodAPI) BuildImage(call iopodman.VarlinkCall, config iopodman.BuildI
namespace = append(namespace, hostNetwork)
options := imagebuildah.BuildOptions{
- ContextDirectory: contextDir,
- PullPolicy: pullPolicy,
- Compression: imagebuildah.Gzip,
- Quiet: false,
- //SignaturePolicyPath:
- Args: config.Build_args,
- //Output:
- AdditionalTags: config.Tags,
- //Runtime: runtime.
- //RuntimeArgs: ,
- OutputFormat: manifestType,
- SystemContext: &systemContext,
- CommonBuildOpts: commonOpts,
- Squash: config.Squash,
- Labels: config.Label,
- Annotations: config.Annotations,
- ReportWriter: output,
- NamespaceOptions: namespace,
+ CommonBuildOpts: commonOpts,
+ AdditionalTags: config.AdditionalTags,
+ Annotations: config.Annotations,
+ Args: config.BuildArgs,
+ CNIConfigDir: config.CniConfigDir,
+ CNIPluginPath: config.CniPluginDir,
+ Compression: stringCompressionToArchiveType(config.Compression),
+ ContextDirectory: newContextDir,
+ DefaultMountsFilePath: config.DefaultsMountFilePath,
+ Err: &output,
+ ForceRmIntermediateCtrs: config.ForceRmIntermediateCtrs,
+ IIDFile: config.Iidfile,
+ Labels: config.Label,
+ Layers: config.Layers,
+ NoCache: config.Nocache,
+ Out: &output,
+ Output: config.Output,
+ NamespaceOptions: namespace,
+ OutputFormat: config.OutputFormat,
+ PullPolicy: stringPullPolicyToType(config.PullPolicy),
+ Quiet: config.Quiet,
+ RemoveIntermediateCtrs: config.RemoteIntermediateCtrs,
+ ReportWriter: &output,
+ RuntimeArgs: config.RuntimeArgs,
+ SignaturePolicyPath: config.SignaturePolicyPath,
+ Squash: config.Squash,
+ SystemContext: &systemContext,
}
if call.WantsMore() {
call.Continues = true
}
- c := build(i.Runtime, options, config.Dockerfile)
+ var newPathDockerFiles []string
+
+ for _, d := range config.Dockerfiles {
+ if strings.HasPrefix(d, "http://") ||
+ strings.HasPrefix(d, "https://") ||
+ strings.HasPrefix(d, "git://") ||
+ strings.HasPrefix(d, "github.com/") {
+ newPathDockerFiles = append(newPathDockerFiles, d)
+ continue
+ }
+ base := filepath.Base(d)
+ newPathDockerFiles = append(newPathDockerFiles, filepath.Join(newContextDir, base))
+ }
+
+ c := make(chan error)
+ go func() {
+ err := i.Runtime.Build(getContext(), options, newPathDockerFiles...)
+ c <- err
+ close(c)
+ }()
+
var log []string
done := false
for {
- line, err := output.ReadString('\n')
+ outputLine, err := output.ReadString('\n')
if err == nil {
- log = append(log, line)
+ log = append(log, outputLine)
+ if call.WantsMore() {
+ // we want to reply with what we have
+ br := iopodman.MoreResponse{
+ Logs: log,
+ }
+ call.ReplyBuildImage(br)
+ log = []string{}
+ }
continue
} else if err == io.EOF {
select {
@@ -236,15 +236,10 @@ func (i *LibpodAPI) BuildImage(call iopodman.VarlinkCall, config iopodman.BuildI
}
done = true
default:
- if !call.WantsMore() {
+ if call.WantsMore() {
time.Sleep(1 * time.Second)
break
}
- br := iopodman.BuildResponse{
- Logs: log,
- }
- call.ReplyBuildImage(br)
- log = []string{}
}
} else {
return call.ReplyErrorOccurred(err.Error())
@@ -254,40 +249,24 @@ func (i *LibpodAPI) BuildImage(call iopodman.VarlinkCall, config iopodman.BuildI
}
}
call.Continues = false
- newImage, err := i.Runtime.ImageRuntime().NewFromLocal(config.Tags[0])
+
+ newImage, err := i.Runtime.ImageRuntime().NewFromLocal(config.Output)
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
- br := iopodman.BuildResponse{
+ br := iopodman.MoreResponse{
Logs: log,
Id: newImage.ID(),
}
return call.ReplyBuildImage(br)
}
-func build(runtime *libpod.Runtime, options imagebuildah.BuildOptions, dockerfiles []string) chan error {
- c := make(chan error)
- go func() {
- err := runtime.Build(getContext(), options, dockerfiles...)
- c <- err
- close(c)
- }()
-
- return c
-}
-
-// CreateImage ...
-// TODO With Pull being added, should we skip Create?
-func (i *LibpodAPI) CreateImage(call iopodman.VarlinkCall) error {
- return call.ReplyMethodNotImplemented("CreateImage")
-}
-
// InspectImage returns an image's inspect information as a string that can be serialized.
// Requires an image ID or name
func (i *LibpodAPI) InspectImage(call iopodman.VarlinkCall, name string) error {
newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
- return call.ReplyImageNotFound(name)
+ return call.ReplyImageNotFound(name, err.Error())
}
inspectInfo, err := newImage.Inspect(getContext())
if err != nil {
@@ -305,7 +284,7 @@ func (i *LibpodAPI) InspectImage(call iopodman.VarlinkCall, name string) error {
func (i *LibpodAPI) HistoryImage(call iopodman.VarlinkCall, name string) error {
newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
- return call.ReplyImageNotFound(name)
+ return call.ReplyImageNotFound(name, err.Error())
}
history, err := newImage.History(getContext())
if err != nil {
@@ -315,7 +294,7 @@ func (i *LibpodAPI) HistoryImage(call iopodman.VarlinkCall, name string) error {
for _, hist := range history {
imageHistory := iopodman.ImageHistory{
Id: hist.ID,
- Created: hist.Created.String(),
+ Created: hist.Created.Format(time.RFC3339),
CreatedBy: hist.CreatedBy,
Tags: newImage.Names(),
Size: hist.Size,
@@ -327,15 +306,14 @@ func (i *LibpodAPI) HistoryImage(call iopodman.VarlinkCall, name string) error {
}
// PushImage pushes an local image to registry
-func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, tlsVerify bool, signaturePolicy, creds, certDir string, compress bool, format string, removeSignatures bool, signBy string) error {
+func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, tlsVerify *bool, signaturePolicy, creds, certDir string, compress bool, format string, removeSignatures bool, signBy string) error {
var (
registryCreds *types.DockerAuthConfig
manifestType string
)
-
newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
- return call.ReplyImageNotFound(err.Error())
+ return call.ReplyImageNotFound(name, err.Error())
}
destname := name
if tag != "" {
@@ -352,8 +330,8 @@ func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, tlsVe
DockerRegistryCreds: registryCreds,
DockerCertPath: certDir,
}
- if !tlsVerify {
- dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ if tlsVerify != nil {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!*tlsVerify)
}
if format != "" {
switch format {
@@ -372,17 +350,66 @@ func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, tlsVe
SignBy: signBy,
}
- if err := newImage.PushImageToHeuristicDestination(getContext(), destname, manifestType, "", signaturePolicy, nil, compress, so, &dockerRegistryOptions, nil); err != nil {
- return call.ReplyErrorOccurred(err.Error())
+ if call.WantsMore() {
+ call.Continues = true
}
- return call.ReplyPushImage(newImage.ID())
+
+ output := bytes.NewBuffer([]byte{})
+ c := make(chan error)
+ go func() {
+ err := newImage.PushImageToHeuristicDestination(getContext(), destname, manifestType, "", signaturePolicy, output, compress, so, &dockerRegistryOptions, nil)
+ c <- err
+ close(c)
+ }()
+
+ // TODO When pull output gets fixed for the remote client, we need to look into how we can turn below
+ // into something re-usable. it is in build too
+ var log []string
+ done := false
+ for {
+ line, err := output.ReadString('\n')
+ if err == nil {
+ log = append(log, line)
+ continue
+ } else if err == io.EOF {
+ select {
+ case err := <-c:
+ if err != nil {
+ logrus.Errorf("reading of output during push failed for %s", newImage.ID())
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ done = true
+ default:
+ if !call.WantsMore() {
+ time.Sleep(1 * time.Second)
+ break
+ }
+ br := iopodman.MoreResponse{
+ Logs: log,
+ }
+ call.ReplyPushImage(br)
+ log = []string{}
+ }
+ } else {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if done {
+ break
+ }
+ }
+ call.Continues = false
+
+ br := iopodman.MoreResponse{
+ Logs: log,
+ }
+ return call.ReplyPushImage(br)
}
// TagImage accepts an image name and tag as strings and tags an image in the local store.
func (i *LibpodAPI) TagImage(call iopodman.VarlinkCall, name, tag string) error {
newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
- return call.ReplyImageNotFound(name)
+ return call.ReplyImageNotFound(name, err.Error())
}
if err := newImage.TagImage(tag); err != nil {
return call.ReplyErrorOccurred(err.Error())
@@ -396,7 +423,7 @@ func (i *LibpodAPI) RemoveImage(call iopodman.VarlinkCall, name string, force bo
ctx := getContext()
newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
- return call.ReplyImageNotFound(name)
+ return call.ReplyImageNotFound(name, err.Error())
}
_, err = i.Runtime.RemoveImage(ctx, newImage, force)
if err != nil {
@@ -405,38 +432,57 @@ func (i *LibpodAPI) RemoveImage(call iopodman.VarlinkCall, name string, force bo
return call.ReplyRemoveImage(newImage.ID())
}
-// SearchImage searches all registries configured in /etc/containers/registries.conf for an image
+// SearchImages searches all registries configured in /etc/containers/registries.conf for an image
// Requires an image name and a search limit as int
-func (i *LibpodAPI) SearchImage(call iopodman.VarlinkCall, name string, limit int64) error {
- sc := image.GetSystemContext("", "", false)
- registries, err := sysreg.GetRegistries()
+func (i *LibpodAPI) SearchImages(call iopodman.VarlinkCall, query string, limit *int64, tlsVerify *bool, filter iopodman.ImageSearchFilter) error {
+ // Transform all arguments to proper types first
+ argLimit := 0
+ argTLSVerify := types.OptionalBoolUndefined
+ argIsOfficial := types.OptionalBoolUndefined
+ argIsAutomated := types.OptionalBoolUndefined
+ if limit != nil {
+ argLimit = int(*limit)
+ }
+ if tlsVerify != nil {
+ argTLSVerify = types.NewOptionalBool(!*tlsVerify)
+ }
+ if filter.Is_official != nil {
+ argIsOfficial = types.NewOptionalBool(*filter.Is_official)
+ }
+ if filter.Is_automated != nil {
+ argIsAutomated = types.NewOptionalBool(*filter.Is_automated)
+ }
+
+ // Transform a SearchFilter the backend can deal with
+ sFilter := image.SearchFilter{
+ IsOfficial: argIsOfficial,
+ IsAutomated: argIsAutomated,
+ Stars: int(filter.Star_count),
+ }
+
+ searchOptions := image.SearchOptions{
+ Limit: argLimit,
+ Filter: sFilter,
+ InsecureSkipTLSVerify: argTLSVerify,
+ }
+ results, err := image.SearchImages(query, searchOptions)
if err != nil {
- return call.ReplyErrorOccurred(fmt.Sprintf("unable to get system registries: %q", err))
+ return call.ReplyErrorOccurred(err.Error())
}
- var imageResults []iopodman.ImageSearch
- for _, reg := range registries {
- results, err := docker.SearchRegistry(getContext(), sc, reg, name, int(limit))
- if err != nil {
- // If we are searching multiple registries, don't make something like an
- // auth error fatal. Unfortunately we cannot differentiate between auth
- // errors and other possibles errors
- if len(registries) > 1 {
- continue
- }
- return call.ReplyErrorOccurred(err.Error())
- }
- for _, result := range results {
- i := iopodman.ImageSearch{
- Description: result.Description,
- Is_official: result.IsOfficial,
- Is_automated: result.IsAutomated,
- Name: result.Name,
- Star_count: int64(result.StarCount),
- }
- imageResults = append(imageResults, i)
+
+ var imageResults []iopodman.ImageSearchResult
+ for _, result := range results {
+ i := iopodman.ImageSearchResult{
+ Registry: result.Index,
+ Description: result.Description,
+ Is_official: result.Official == "[OK]",
+ Is_automated: result.Automated == "[OK]",
+ Name: result.Name,
+ Star_count: int64(result.Stars),
}
+ imageResults = append(imageResults, i)
}
- return call.ReplySearchImage(imageResults)
+ return call.ReplySearchImages(imageResults)
}
// DeleteUnusedImages deletes any images that do not have containers associated with it.
@@ -466,7 +512,7 @@ func (i *LibpodAPI) DeleteUnusedImages(call iopodman.VarlinkCall) error {
func (i *LibpodAPI) Commit(call iopodman.VarlinkCall, name, imageName string, changes []string, author, message string, pause bool, manifestType string) error {
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
- return call.ReplyContainerNotFound(name)
+ return call.ReplyContainerNotFound(name, err.Error())
}
sc := image.GetSystemContext(i.Runtime.GetConfig().SignaturePolicyPath, "", false)
var mimeType string
@@ -530,7 +576,7 @@ func (i *LibpodAPI) ImportImage(call iopodman.VarlinkCall, source, reference, me
func (i *LibpodAPI) ExportImage(call iopodman.VarlinkCall, name, destination string, compress bool, tags []string) error {
newImage, err := i.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
- return call.ReplyImageNotFound(name)
+ return call.ReplyImageNotFound(name, err.Error())
}
additionalTags, err := image.GetAdditionalTags(tags)
@@ -545,7 +591,7 @@ func (i *LibpodAPI) ExportImage(call iopodman.VarlinkCall, name, destination str
}
// PullImage pulls an image from a registry to the image store.
-func (i *LibpodAPI) PullImage(call iopodman.VarlinkCall, name string, certDir, creds, signaturePolicy string, tlsVerify bool) error {
+func (i *LibpodAPI) PullImage(call iopodman.VarlinkCall, name string, certDir, creds, signaturePolicy string, tlsVerify *bool) error {
var (
registryCreds *types.DockerAuthConfig
imageID string
@@ -562,30 +608,80 @@ func (i *LibpodAPI) PullImage(call iopodman.VarlinkCall, name string, certDir, c
DockerRegistryCreds: registryCreds,
DockerCertPath: certDir,
}
- if tlsVerify {
- dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify)
+ if tlsVerify != nil {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!*tlsVerify)
}
so := image.SigningOptions{}
- if strings.HasPrefix(name, dockerarchive.Transport.Name()+":") {
- srcRef, err := alltransports.ParseImageName(name)
- if err != nil {
- return errors.Wrapf(err, "error parsing %q", name)
+ if call.WantsMore() {
+ call.Continues = true
+ }
+ output := bytes.NewBuffer([]byte{})
+ c := make(chan error)
+ go func() {
+ //err := newImage.PushImageToHeuristicDestination(getContext(), destname, manifestType, "", signaturePolicy, output, compress, so, &dockerRegistryOptions, nil)
+ if strings.HasPrefix(name, dockerarchive.Transport.Name()+":") {
+ srcRef, err := alltransports.ParseImageName(name)
+ if err != nil {
+ c <- errors.Wrapf(err, "error parsing %q", name)
+ }
+ newImage, err := i.Runtime.ImageRuntime().LoadFromArchiveReference(getContext(), srcRef, signaturePolicy, output)
+ if err != nil {
+ c <- errors.Wrapf(err, "error pulling image from %q", name)
+ }
+ imageID = newImage[0].ID()
+ } else {
+ newImage, err := i.Runtime.ImageRuntime().New(getContext(), name, signaturePolicy, "", output, &dockerRegistryOptions, so, false, nil)
+ if err != nil {
+ c <- errors.Wrapf(err, "unable to pull %s", name)
+ }
+ imageID = newImage.ID()
}
- newImage, err := i.Runtime.ImageRuntime().LoadFromArchiveReference(getContext(), srcRef, signaturePolicy, nil)
- if err != nil {
- return errors.Wrapf(err, "error pulling image from %q", name)
+ c <- nil
+ close(c)
+ }()
+
+ var log []string
+ done := false
+ for {
+ line, err := output.ReadString('\n')
+ if err == nil {
+ log = append(log, line)
+ continue
+ } else if err == io.EOF {
+ select {
+ case err := <-c:
+ if err != nil {
+ logrus.Errorf("reading of output during pull failed for %s", name)
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ done = true
+ default:
+ if !call.WantsMore() {
+ time.Sleep(1 * time.Second)
+ break
+ }
+ br := iopodman.MoreResponse{
+ Logs: log,
+ }
+ call.ReplyPullImage(br)
+ log = []string{}
+ }
+ } else {
+ return call.ReplyErrorOccurred(err.Error())
}
- imageID = newImage[0].ID()
- } else {
- newImage, err := i.Runtime.ImageRuntime().New(getContext(), name, signaturePolicy, "", nil, &dockerRegistryOptions, so, false, nil)
- if err != nil {
- return call.ReplyErrorOccurred(fmt.Sprintf("unable to pull %s: %s", name, err.Error()))
+ if done {
+ break
}
- imageID = newImage.ID()
}
- return call.ReplyPullImage(imageID)
+ call.Continues = false
+
+ br := iopodman.MoreResponse{
+ Logs: log,
+ Id: imageID,
+ }
+ return call.ReplyPullImage(br)
}
// ImageExists returns bool as to whether the input image exists in local storage
@@ -606,8 +702,8 @@ func (i *LibpodAPI) ContainerRunlabel(call iopodman.VarlinkCall, input iopodman.
dockerRegistryOptions := image.DockerRegistryOptions{
DockerCertPath: input.CertDir,
}
- if !input.TlsVerify {
- dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ if input.TlsVerify != nil {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!*input.TlsVerify)
}
stdErr := os.Stderr
@@ -640,3 +736,172 @@ func (i *LibpodAPI) ImagesPrune(call iopodman.VarlinkCall, all bool) error {
}
return call.ReplyImagesPrune(prunedImages)
}
+
+// ImageSave ....
+func (i *LibpodAPI) ImageSave(call iopodman.VarlinkCall, options iopodman.ImageSaveOptions) error {
+ newImage, err := i.Runtime.ImageRuntime().NewFromLocal(options.Name)
+ if err != nil {
+ if errors.Cause(err) == libpod.ErrNoSuchImage {
+ return call.ReplyImageNotFound(options.Name, err.Error())
+ }
+ return call.ReplyErrorOccurred(err.Error())
+ }
+
+ // Determine if we are dealing with a tarball or dir
+ var output string
+ outputToDir := false
+ if options.Format == "oci-archive" || options.Format == "docker-archive" {
+ tempfile, err := ioutil.TempFile("", "varlink_send")
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ output = tempfile.Name()
+ tempfile.Close()
+ } else {
+ var err error
+ outputToDir = true
+ output, err = ioutil.TempDir("", "varlink_send")
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ }
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if call.WantsMore() {
+ call.Continues = true
+ }
+
+ saveOutput := bytes.NewBuffer([]byte{})
+ c := make(chan error)
+ go func() {
+ err := newImage.Save(getContext(), options.Name, options.Format, output, options.MoreTags, options.Quiet, options.Compress)
+ c <- err
+ close(c)
+ }()
+ var log []string
+ done := false
+ for {
+ line, err := saveOutput.ReadString('\n')
+ if err == nil {
+ log = append(log, line)
+ continue
+ } else if err == io.EOF {
+ select {
+ case err := <-c:
+ if err != nil {
+ logrus.Errorf("reading of output during save failed for %s", newImage.ID())
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ done = true
+ default:
+ if !call.WantsMore() {
+ time.Sleep(1 * time.Second)
+ break
+ }
+ br := iopodman.MoreResponse{
+ Logs: log,
+ }
+ call.ReplyImageSave(br)
+ log = []string{}
+ }
+ } else {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if done {
+ break
+ }
+ }
+ call.Continues = false
+
+ sendfile := output
+ // Image has been saved to `output`
+ if outputToDir {
+ // If the output is a directory, we need to tar up the directory to send it back
+ //Create a tempfile for the directory tarball
+ outputFile, err := ioutil.TempFile("", "varlink_save_dir")
+ if err != nil {
+ return err
+ }
+ defer outputFile.Close()
+ if err := utils.TarToFilesystem(output, outputFile); err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ sendfile = outputFile.Name()
+ }
+ br := iopodman.MoreResponse{
+ Logs: log,
+ Id: sendfile,
+ }
+ return call.ReplyPushImage(br)
+}
+
+// LoadImage ...
+func (i *LibpodAPI) LoadImage(call iopodman.VarlinkCall, name, inputFile string, deleteInputFile, quiet bool) error {
+ var (
+ names string
+ writer io.Writer
+ err error
+ )
+ if !quiet {
+ writer = os.Stderr
+ }
+
+ if call.WantsMore() {
+ call.Continues = true
+ }
+ output := bytes.NewBuffer([]byte{})
+
+ c := make(chan error)
+ go func() {
+ names, err = i.Runtime.LoadImage(getContext(), name, inputFile, writer, "")
+ c <- err
+ close(c)
+ }()
+
+ var log []string
+ done := false
+ for {
+ line, err := output.ReadString('\n')
+ if err == nil {
+ log = append(log, line)
+ continue
+ } else if err == io.EOF {
+ select {
+ case err := <-c:
+ if err != nil {
+ logrus.Error(err)
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ done = true
+ default:
+ if !call.WantsMore() {
+ time.Sleep(1 * time.Second)
+ break
+ }
+ br := iopodman.MoreResponse{
+ Logs: log,
+ }
+ call.ReplyLoadImage(br)
+ log = []string{}
+ }
+ } else {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if done {
+ break
+ }
+ }
+ call.Continues = false
+
+ br := iopodman.MoreResponse{
+ Logs: log,
+ Id: names,
+ }
+ if deleteInputFile {
+ if err := os.Remove(inputFile); err != nil {
+ logrus.Errorf("unable to delete input file %s", inputFile)
+ }
+ }
+ return call.ReplyLoadImage(br)
+}
diff --git a/pkg/varlinkapi/pods.go b/pkg/varlinkapi/pods.go
index 6e758786a..4ca4c4270 100644
--- a/pkg/varlinkapi/pods.go
+++ b/pkg/varlinkapi/pods.go
@@ -2,6 +2,7 @@ package varlinkapi
import (
"encoding/json"
+ "github.com/containers/libpod/pkg/adapter/shortcuts"
"github.com/containers/libpod/pkg/rootless"
"syscall"
@@ -13,10 +14,6 @@ import (
// CreatePod ...
func (i *LibpodAPI) CreatePod(call iopodman.VarlinkCall, create iopodman.PodCreate) error {
var options []libpod.PodCreateOption
-
- if create.InfraCommand != "" || create.InfraImage != "" {
- return call.ReplyErrorOccurred("the infra-command and infra-image options are not supported yet")
- }
if create.CgroupParent != "" {
options = append(options, libpod.WithPodCgroupParent(create.CgroupParent))
}
@@ -89,7 +86,7 @@ func (i *LibpodAPI) ListPods(call iopodman.VarlinkCall) error {
func (i *LibpodAPI) GetPod(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
opts := shared.PsOptions{}
@@ -105,7 +102,7 @@ func (i *LibpodAPI) GetPod(call iopodman.VarlinkCall, name string) error {
func (i *LibpodAPI) InspectPod(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
inspectData, err := pod.Inspect()
if err != nil {
@@ -122,7 +119,7 @@ func (i *LibpodAPI) InspectPod(call iopodman.VarlinkCall, name string) error {
func (i *LibpodAPI) StartPod(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
ctnrs, err := pod.AllContainers()
if err != nil {
@@ -143,7 +140,7 @@ func (i *LibpodAPI) StartPod(call iopodman.VarlinkCall, name string) error {
func (i *LibpodAPI) StopPod(call iopodman.VarlinkCall, name string, timeout int64) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
ctrErrs, err := pod.StopWithTimeout(getContext(), true, int(timeout))
callErr := handlePodCall(call, pod, ctrErrs, err)
@@ -157,7 +154,7 @@ func (i *LibpodAPI) StopPod(call iopodman.VarlinkCall, name string, timeout int6
func (i *LibpodAPI) RestartPod(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
ctnrs, err := pod.AllContainers()
if err != nil {
@@ -184,7 +181,7 @@ func (i *LibpodAPI) KillPod(call iopodman.VarlinkCall, name string, signal int64
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
ctrErrs, err := pod.Kill(killSignal)
callErr := handlePodCall(call, pod, ctrErrs, err)
@@ -198,7 +195,7 @@ func (i *LibpodAPI) KillPod(call iopodman.VarlinkCall, name string, signal int64
func (i *LibpodAPI) PausePod(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
ctrErrs, err := pod.Pause()
callErr := handlePodCall(call, pod, ctrErrs, err)
@@ -212,7 +209,7 @@ func (i *LibpodAPI) PausePod(call iopodman.VarlinkCall, name string) error {
func (i *LibpodAPI) UnpausePod(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
ctrErrs, err := pod.Unpause()
callErr := handlePodCall(call, pod, ctrErrs, err)
@@ -227,7 +224,7 @@ func (i *LibpodAPI) RemovePod(call iopodman.VarlinkCall, name string, force bool
ctx := getContext()
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
if err = i.Runtime.RemovePod(ctx, pod, force, force); err != nil {
return call.ReplyErrorOccurred(err.Error())
@@ -240,7 +237,7 @@ func (i *LibpodAPI) RemovePod(call iopodman.VarlinkCall, name string, force bool
func (i *LibpodAPI) GetPodStats(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)
if err != nil {
- return call.ReplyPodNotFound(name)
+ return call.ReplyPodNotFound(name, err.Error())
}
prevStats := make(map[string]*libpod.ContainerStats)
podStats, err := pod.GetPodStats(prevStats)
@@ -271,3 +268,34 @@ func (i *LibpodAPI) GetPodStats(call iopodman.VarlinkCall, name string) error {
}
return call.ReplyGetPodStats(pod.ID(), containersStats)
}
+
+// GetPodsByContext returns a slice of pod ids based on all, latest, or a list
+func (i *LibpodAPI) GetPodsByContext(call iopodman.VarlinkCall, all, latest bool, input []string) error {
+ var podids []string
+
+ pods, err := shortcuts.GetPodsByContext(all, latest, input, i.Runtime)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ for _, p := range pods {
+ podids = append(podids, p.ID())
+ }
+ return call.ReplyGetPodsByContext(podids)
+}
+
+// PodStateData returns a container's state data in string format
+func (i *LibpodAPI) PodStateData(call iopodman.VarlinkCall, name string) error {
+ pod, err := i.Runtime.LookupPod(name)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ data, err := pod.Inspect()
+ if err != nil {
+ return call.ReplyErrorOccurred("unable to obtain pod state")
+ }
+ b, err := json.Marshal(data)
+ if err != nil {
+ return call.ReplyErrorOccurred("unable to serialize pod inspect data")
+ }
+ return call.ReplyPodStateData(string(b))
+}
diff --git a/pkg/varlinkapi/system.go b/pkg/varlinkapi/system.go
index 376502f21..3f32615ec 100644
--- a/pkg/varlinkapi/system.go
+++ b/pkg/varlinkapi/system.go
@@ -3,6 +3,7 @@ package varlinkapi
import (
goruntime "runtime"
"strings"
+ "time"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
@@ -15,22 +16,14 @@ func (i *LibpodAPI) GetVersion(call iopodman.VarlinkCall) error {
return err
}
- return call.ReplyGetVersion(iopodman.Version{
- Remote_api_version: versionInfo.RemoteAPIVersion,
- Version: versionInfo.Version,
- Go_version: versionInfo.GoVersion,
- Git_commit: versionInfo.GitCommit,
- Built: versionInfo.Built,
- Os_arch: versionInfo.OsArch,
- })
-}
-
-// Ping returns a simple string "OK" response for clients to make sure
-// the service is working.
-func (i *LibpodAPI) Ping(call iopodman.VarlinkCall) error {
- return call.ReplyPing(iopodman.StringResponse{
- Message: "OK",
- })
+ return call.ReplyGetVersion(
+ versionInfo.Version,
+ versionInfo.GoVersion,
+ versionInfo.GitCommit,
+ time.Unix(versionInfo.Built, 0).Format(time.RFC3339),
+ versionInfo.OsArch,
+ versionInfo.RemoteAPIVersion,
+ )
}
// GetInfo returns details about the podman host and its stores
diff --git a/pkg/varlinkapi/transfers.go b/pkg/varlinkapi/transfers.go
index 0cb7e5e2e..9a97bc810 100644
--- a/pkg/varlinkapi/transfers.go
+++ b/pkg/varlinkapi/transfers.go
@@ -8,6 +8,7 @@ import (
"os"
"github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/sirupsen/logrus"
)
// SendFile allows a client to send a file to the varlink server
@@ -34,6 +35,7 @@ func (i *LibpodAPI) SendFile(call iopodman.VarlinkCall, ftype string, length int
return err
}
+ logrus.Debugf("successfully received %s", outputFile.Name())
// Send an ACK to the client
call.Call.Writer.WriteString(fmt.Sprintf("%s:", outputFile.Name()))
call.Call.Writer.Flush()
diff --git a/pkg/varlinkapi/util.go b/pkg/varlinkapi/util.go
index a80c8db41..7e487c03a 100644
--- a/pkg/varlinkapi/util.go
+++ b/pkg/varlinkapi/util.go
@@ -3,11 +3,14 @@ package varlinkapi
import (
"context"
"strconv"
+ "strings"
"time"
+ "github.com/containers/buildah"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
+ "github.com/containers/storage/pkg/archive"
)
// getContext returns a non-nil, empty context
@@ -15,7 +18,7 @@ func getContext() context.Context {
return context.TODO()
}
-func makeListContainer(containerID string, batchInfo shared.BatchContainerStruct) iopodman.ListContainerData {
+func makeListContainer(containerID string, batchInfo shared.BatchContainerStruct) iopodman.Container {
var (
mounts []iopodman.ContainerMount
ports []iopodman.ContainerPortMappings
@@ -56,12 +59,12 @@ func makeListContainer(containerID string, batchInfo shared.BatchContainerStruct
Ipc: ns.IPC,
}
- lc := iopodman.ListContainerData{
+ lc := iopodman.Container{
Id: containerID,
Image: batchInfo.ConConfig.RootfsImageName,
Imageid: batchInfo.ConConfig.RootfsImageID,
Command: batchInfo.ConConfig.Spec.Process.Args,
- Createdat: batchInfo.ConConfig.CreatedTime.String(),
+ Createdat: batchInfo.ConConfig.CreatedTime.Format(time.RFC3339),
Runningfor: time.Since(batchInfo.ConConfig.CreatedTime).String(),
Status: batchInfo.ConState.String(),
Ports: ports,
@@ -107,7 +110,7 @@ func makeListPod(pod *libpod.Pod, batchInfo shared.PsOptions) (iopodman.ListPodD
listPodsContainers = append(listPodsContainers, makeListPodContainers(ctr.ID(), batchInfo))
}
listPod := iopodman.ListPodData{
- Createdat: pod.CreatedTime().String(),
+ Createdat: pod.CreatedTime().Format(time.RFC3339),
Id: pod.ID(),
Name: pod.Name(),
Status: status,
@@ -133,3 +136,27 @@ func handlePodCall(call iopodman.VarlinkCall, pod *libpod.Pod, ctrErrs map[strin
return nil
}
+
+func stringCompressionToArchiveType(s string) archive.Compression {
+ switch strings.ToUpper(s) {
+ case "BZIP2":
+ return archive.Bzip2
+ case "GZIP":
+ return archive.Gzip
+ case "XZ":
+ return archive.Xz
+ }
+ return archive.Uncompressed
+}
+
+func stringPullPolicyToType(s string) buildah.PullPolicy {
+ switch strings.ToUpper(s) {
+ case "PULLIFMISSING":
+ return buildah.PullIfMissing
+ case "PULLALWAYS":
+ return buildah.PullAlways
+ case "PULLNEVER":
+ return buildah.PullNever
+ }
+ return buildah.PullIfMissing
+}
diff --git a/pkg/varlinkapi/volumes.go b/pkg/varlinkapi/volumes.go
new file mode 100644
index 000000000..02874d2b1
--- /dev/null
+++ b/pkg/varlinkapi/volumes.go
@@ -0,0 +1,90 @@
+package varlinkapi
+
+import (
+ "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod"
+)
+
+// VolumeCreate creates a libpod volume based on input from a varlink connection
+func (i *LibpodAPI) VolumeCreate(call iopodman.VarlinkCall, options iopodman.VolumeCreateOpts) error {
+ var volumeOptions []libpod.VolumeCreateOption
+
+ if len(options.VolumeName) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeName(options.VolumeName))
+ }
+ if len(options.Driver) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeDriver(options.Driver))
+ }
+ if len(options.Labels) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeLabels(options.Labels))
+ }
+ if len(options.Options) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeOptions(options.Options))
+ }
+ newVolume, err := i.Runtime.NewVolume(getContext(), volumeOptions...)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyVolumeCreate(newVolume.Name())
+}
+
+// VolumeRemove removes volumes by options.All or options.Volumes
+func (i *LibpodAPI) VolumeRemove(call iopodman.VarlinkCall, options iopodman.VolumeRemoveOpts) error {
+ deletedVolumes, err := i.Runtime.RemoveVolumes(getContext(), options.Volumes, options.All, options.Force)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyVolumeRemove(deletedVolumes)
+}
+
+// GetVolumes returns all the volumes known to the remote system
+func (i *LibpodAPI) GetVolumes(call iopodman.VarlinkCall, args []string, all bool) error {
+ var (
+ err error
+ reply []*libpod.Volume
+ volumes []iopodman.Volume
+ )
+ if all {
+ reply, err = i.Runtime.GetAllVolumes()
+ } else {
+ for _, v := range args {
+ vol, err := i.Runtime.GetVolume(v)
+ if err != nil {
+ return err
+ }
+ reply = append(reply, vol)
+ }
+ }
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ // Build the iopodman.volume struct for the return
+ for _, v := range reply {
+ newVol := iopodman.Volume{
+ Driver: v.Driver(),
+ Labels: v.Labels(),
+ MountPoint: v.MountPoint(),
+ Name: v.Name(),
+ Options: v.Options(),
+ Scope: v.Scope(),
+ }
+ volumes = append(volumes, newVol)
+ }
+ return call.ReplyGetVolumes(volumes)
+}
+
+// VolumesPrune removes unused images via a varlink call
+func (i *LibpodAPI) VolumesPrune(call iopodman.VarlinkCall) error {
+ var errs []string
+ prunedNames, prunedErrors := i.Runtime.PruneVolumes(getContext())
+ if len(prunedErrors) == 0 {
+ return call.ReplyVolumesPrune(prunedNames, []string{})
+ }
+
+ // We need to take the errors and capture their strings to go back over
+ // varlink
+ for _, e := range prunedErrors {
+ errs = append(errs, e.Error())
+ }
+ return call.ReplyVolumesPrune(prunedNames, errs)
+}
diff --git a/test/README.md b/test/README.md
index 90dcdfe3d..ef3bfbcf9 100644
--- a/test/README.md
+++ b/test/README.md
@@ -66,7 +66,7 @@ switch is optional.
You can run a single file of integration tests using the go test command:
```
-GOPATH=~/go go test -v test/e2e/libpod_suite_test.go test/e2e/config.go test/e2e/config_amd64.go test/e2e/your_test.go
+GOPATH=~/go go test -v test/e2e/libpod_suite_test.go test/e2e/common_test.go test/e2e/config.go test/e2e/config_amd64.go test/e2e/your_test.go
```
#### Run all tests like PAPR
diff --git a/test/bin2img/bin2img.go b/test/bin2img/bin2img.go
deleted file mode 100644
index ed493dcc1..000000000
--- a/test/bin2img/bin2img.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package main
-
-import (
- "archive/tar"
- "bytes"
- "context"
- "encoding/json"
- "io"
- "os"
- "runtime"
-
- "github.com/containers/image/pkg/blobinfocache"
- "github.com/containers/image/storage"
- "github.com/containers/image/types"
- sstorage "github.com/containers/storage"
- "github.com/containers/storage/pkg/reexec"
- digest "github.com/opencontainers/go-digest"
- specs "github.com/opencontainers/image-spec/specs-go"
- "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
-)
-
-func main() {
- if reexec.Init() {
- return
- }
-
- app := cli.NewApp()
- app.Name = "bin2img"
- app.Usage = "barebones image builder"
- app.Version = "0.0.1"
-
- app.Flags = []cli.Flag{
- cli.BoolFlag{
- Name: "debug",
- Usage: "turn on debug logging",
- },
- cli.StringFlag{
- Name: "root",
- Usage: "graph root directory",
- },
- cli.StringFlag{
- Name: "runroot",
- Usage: "run root directory",
- },
- cli.StringFlag{
- Name: "storage-driver",
- Usage: "storage driver",
- },
- cli.StringSliceFlag{
- Name: "storage-opt",
- Usage: "storage option",
- },
- cli.StringFlag{
- Name: "image-name",
- Usage: "set image name",
- Value: "kubernetes/pause",
- },
- cli.StringFlag{
- Name: "source-binary",
- Usage: "source binary",
- Value: "../../pause/pause",
- },
- cli.StringFlag{
- Name: "image-binary",
- Usage: "image binary",
- Value: "/pause",
- },
- }
-
- app.Action = func(c *cli.Context) error {
- debug := c.GlobalBool("debug")
- rootDir := c.GlobalString("root")
- runrootDir := c.GlobalString("runroot")
- storageDriver := c.GlobalString("storage-driver")
- storageOptions := c.GlobalStringSlice("storage-opt")
- imageName := c.GlobalString("image-name")
- sourceBinary := c.GlobalString("source-binary")
- imageBinary := c.GlobalString("image-binary")
-
- if debug {
- logrus.SetLevel(logrus.DebugLevel)
- } else {
- logrus.SetLevel(logrus.ErrorLevel)
- }
- if rootDir == "" && runrootDir != "" {
- logrus.Errorf("must set --root and --runroot, or neither")
- os.Exit(1)
- }
- if rootDir != "" && runrootDir == "" {
- logrus.Errorf("must set --root and --runroot, or neither")
- os.Exit(1)
- }
- storeOptions := sstorage.DefaultStoreOptions
- if rootDir != "" && runrootDir != "" {
- storeOptions.GraphDriverName = storageDriver
- storeOptions.GraphDriverOptions = storageOptions
- storeOptions.GraphRoot = rootDir
- storeOptions.RunRoot = runrootDir
- }
- store, err := sstorage.GetStore(storeOptions)
- if err != nil {
- logrus.Errorf("error opening storage: %v", err)
- os.Exit(1)
- }
- defer func() {
- _, _ = store.Shutdown(false)
- }()
-
- layerBuffer := &bytes.Buffer{}
- binary, err := os.Open(sourceBinary)
- if err != nil {
- logrus.Errorf("error opening image binary: %v", err)
- os.Exit(1)
- }
- binInfo, err := binary.Stat()
- if err != nil {
- logrus.Errorf("error statting image binary: %v", err)
- os.Exit(1)
- }
- archive := tar.NewWriter(layerBuffer)
- err = archive.WriteHeader(&tar.Header{
- Name: imageBinary,
- Size: binInfo.Size(),
- Mode: 0555,
- ModTime: binInfo.ModTime(),
- Typeflag: tar.TypeReg,
- Uname: "root",
- Gname: "root",
- })
- if err != nil {
- logrus.Errorf("error writing archive header: %v", err)
- os.Exit(1)
- }
- _, err = io.Copy(archive, binary)
- if err != nil {
- logrus.Errorf("error archiving image binary: %v", err)
- os.Exit(1)
- }
- archive.Close()
- binary.Close()
- layerInfo := types.BlobInfo{
- Digest: digest.Canonical.FromBytes(layerBuffer.Bytes()),
- Size: int64(layerBuffer.Len()),
- }
-
- ref, err := storage.Transport.ParseStoreReference(store, imageName)
- if err != nil {
- logrus.Errorf("error parsing image name: %v", err)
- os.Exit(1)
- }
- ctx := context.TODO()
- img, err := ref.NewImageDestination(ctx, nil)
- if err != nil {
- logrus.Errorf("error preparing to write image: %v", err)
- os.Exit(1)
- }
- defer img.Close()
- layer, err := img.PutBlob(ctx, layerBuffer, layerInfo, blobinfocache.NewMemoryCache(), false)
- if err != nil {
- logrus.Errorf("error preparing to write image: %v", err)
- os.Exit(1)
- }
- config := &v1.Image{
- Architecture: runtime.GOARCH,
- OS: runtime.GOOS,
- Config: v1.ImageConfig{
- User: "root",
- Entrypoint: []string{imageBinary},
- },
- RootFS: v1.RootFS{
- Type: "layers",
- DiffIDs: []digest.Digest{
- layer.Digest,
- },
- },
- }
- cbytes, err := json.Marshal(config)
- if err != nil {
- logrus.Errorf("error encoding configuration: %v", err)
- os.Exit(1)
- }
- configInfo := types.BlobInfo{
- Digest: digest.Canonical.FromBytes(cbytes),
- Size: int64(len(cbytes)),
- }
- configInfo, err = img.PutBlob(ctx, bytes.NewBuffer(cbytes), configInfo, blobinfocache.NewMemoryCache(), false)
- if err != nil {
- logrus.Errorf("error saving configuration: %v", err)
- os.Exit(1)
- }
- manifest := &v1.Manifest{
- Versioned: specs.Versioned{
- SchemaVersion: 2,
- },
- Config: v1.Descriptor{
- MediaType: v1.MediaTypeImageConfig,
- Digest: configInfo.Digest,
- Size: int64(len(cbytes)),
- },
- Layers: []v1.Descriptor{{
- MediaType: v1.MediaTypeImageLayer,
- Digest: layer.Digest,
- Size: layer.Size,
- }},
- }
- mbytes, err := json.Marshal(manifest)
- if err != nil {
- logrus.Errorf("error encoding manifest: %v", err)
- os.Exit(1)
- }
- err = img.PutManifest(ctx, mbytes)
- if err != nil {
- logrus.Errorf("error saving manifest: %v", err)
- os.Exit(1)
- }
- err = img.Commit(ctx)
- if err != nil {
- logrus.Errorf("error committing image: %v", err)
- os.Exit(1)
- }
- return nil
- }
-
- if err := app.Run(os.Args); err != nil {
- logrus.Fatal(err)
- }
-}
diff --git a/test/copyimg/copyimg.go b/test/copyimg/copyimg.go
deleted file mode 100644
index da00964fa..000000000
--- a/test/copyimg/copyimg.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package main
-
-import (
- "context"
- "os"
-
- "github.com/containers/image/copy"
- "github.com/containers/image/signature"
- "github.com/containers/image/storage"
- "github.com/containers/image/transports/alltransports"
- "github.com/containers/image/types"
- sstorage "github.com/containers/storage"
- "github.com/containers/storage/pkg/reexec"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
-)
-
-func main() {
- if reexec.Init() {
- return
- }
-
- app := cli.NewApp()
- app.Name = "copyimg"
- app.Usage = "barebones image copier"
- app.Version = "0.0.1"
-
- app.Flags = []cli.Flag{
- cli.BoolFlag{
- Name: "debug",
- Usage: "turn on debug logging",
- },
- cli.StringFlag{
- Name: "root",
- Usage: "graph root directory",
- },
- cli.StringFlag{
- Name: "runroot",
- Usage: "run root directory",
- },
- cli.StringFlag{
- Name: "storage-driver",
- Usage: "storage driver",
- },
- cli.StringSliceFlag{
- Name: "storage-opt",
- Usage: "storage option",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "signature policy",
- },
- cli.StringFlag{
- Name: "image-name",
- Usage: "set image name",
- },
- cli.StringFlag{
- Name: "add-name",
- Usage: "name to add to image",
- },
- cli.StringFlag{
- Name: "import-from",
- Usage: "import source",
- },
- cli.StringFlag{
- Name: "export-to",
- Usage: "export target",
- },
- }
-
- app.Action = func(c *cli.Context) error {
- var store sstorage.Store
- var ref, importRef, exportRef types.ImageReference
- var err error
-
- debug := c.GlobalBool("debug")
- rootDir := c.GlobalString("root")
- runrootDir := c.GlobalString("runroot")
- storageDriver := c.GlobalString("storage-driver")
- storageOptions := c.GlobalStringSlice("storage-opt")
- signaturePolicy := c.GlobalString("signature-policy")
- imageName := c.GlobalString("image-name")
- addName := c.GlobalString("add-name")
- importFrom := c.GlobalString("import-from")
- exportTo := c.GlobalString("export-to")
-
- if debug {
- logrus.SetLevel(logrus.DebugLevel)
- } else {
- logrus.SetLevel(logrus.ErrorLevel)
- }
-
- if imageName != "" {
- if rootDir == "" && runrootDir != "" {
- logrus.Errorf("must set --root and --runroot, or neither")
- os.Exit(1)
- }
- if rootDir != "" && runrootDir == "" {
- logrus.Errorf("must set --root and --runroot, or neither")
- os.Exit(1)
- }
- storeOptions := sstorage.DefaultStoreOptions
- if rootDir != "" && runrootDir != "" {
- storeOptions.GraphDriverName = storageDriver
- storeOptions.GraphDriverOptions = storageOptions
- storeOptions.GraphRoot = rootDir
- storeOptions.RunRoot = runrootDir
- }
- store, err = sstorage.GetStore(storeOptions)
- if err != nil {
- logrus.Errorf("error opening storage: %v", err)
- os.Exit(1)
- }
- defer func() {
- _, _ = store.Shutdown(false)
- }()
-
- storage.Transport.SetStore(store)
- ref, err = storage.Transport.ParseStoreReference(store, imageName)
- if err != nil {
- logrus.Errorf("error parsing image name: %v", err)
- os.Exit(1)
- }
- }
-
- systemContext := types.SystemContext{
- SignaturePolicyPath: signaturePolicy,
- }
- policy, err := signature.DefaultPolicy(&systemContext)
- if err != nil {
- logrus.Errorf("error loading signature policy: %v", err)
- os.Exit(1)
- }
- policyContext, err := signature.NewPolicyContext(policy)
- if err != nil {
- logrus.Errorf("error loading signature policy: %v", err)
- os.Exit(1)
- }
- defer func() {
- _ = policyContext.Destroy()
- }()
- options := &copy.Options{}
-
- if importFrom != "" {
- importRef, err = alltransports.ParseImageName(importFrom)
- if err != nil {
- logrus.Errorf("error parsing image name %v: %v", importFrom, err)
- os.Exit(1)
- }
- }
-
- if exportTo != "" {
- exportRef, err = alltransports.ParseImageName(exportTo)
- if err != nil {
- logrus.Errorf("error parsing image name %v: %v", exportTo, err)
- os.Exit(1)
- }
- }
-
- ctx := context.TODO()
- if imageName != "" {
- if importFrom != "" {
- _, err = copy.Image(ctx, policyContext, ref, importRef, options)
- if err != nil {
- logrus.Errorf("error importing %s: %v", importFrom, err)
- os.Exit(1)
- }
- }
- if addName != "" {
- destImage, err1 := storage.Transport.GetStoreImage(store, ref)
- if err1 != nil {
- logrus.Errorf("error finding image: %v", err1)
- os.Exit(1)
- }
- names := append(destImage.Names, imageName, addName)
- err = store.SetNames(destImage.ID, names)
- if err != nil {
- logrus.Errorf("error adding name to %s: %v", imageName, err)
- os.Exit(1)
- }
- }
- if exportTo != "" {
- _, err = copy.Image(ctx, policyContext, exportRef, ref, options)
- if err != nil {
- logrus.Errorf("error exporting %s: %v", exportTo, err)
- os.Exit(1)
- }
- }
- } else {
- if importFrom != "" && exportTo != "" {
- _, err = copy.Image(ctx, policyContext, exportRef, importRef, options)
- if err != nil {
- logrus.Errorf("error copying %s to %s: %v", importFrom, exportTo, err)
- os.Exit(1)
- }
- }
- }
- return nil
- }
-
- if err := app.Run(os.Args); err != nil {
- logrus.Fatal(err)
- }
-}
diff --git a/test/e2e/cp_test.go b/test/e2e/cp_test.go
new file mode 100644
index 000000000..e1e760ee0
--- /dev/null
+++ b/test/e2e/cp_test.go
@@ -0,0 +1,115 @@
+// +build !remoteclient
+
+package integration
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman cp", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman cp file", func() {
+ path, err := os.Getwd()
+ if err != nil {
+ os.Exit(1)
+ }
+ filePath := filepath.Join(path, "cp_test.txt")
+ fromHostToContainer := []byte("copy from host to container")
+ err = ioutil.WriteFile(filePath, fromHostToContainer, 0644)
+ if err != nil {
+ os.Exit(1)
+ }
+
+ session := podmanTest.Podman([]string{"create", ALPINE, "cat", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ name := session.OutputToString()
+
+ session = podmanTest.Podman([]string{"cp", filepath.Join(path, "cp_test.txt"), name + ":foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "-a", name})
+ session.WaitWithDefaultTimeout()
+
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Equal("copy from host to container"))
+
+ session = podmanTest.Podman([]string{"cp", name + ":foo", filepath.Join(path, "cp_from_container")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ c := exec.Command("cat", filepath.Join(path, "cp_from_container"))
+ output, err := c.Output()
+ if err != nil {
+ os.Exit(1)
+ }
+ Expect(string(output)).To(Equal("copy from host to container"))
+ })
+
+ It("podman cp file to dir", func() {
+ path, err := os.Getwd()
+ if err != nil {
+ os.Exit(1)
+ }
+ filePath := filepath.Join(path, "cp_test.txt")
+ fromHostToContainer := []byte("copy from host to container directory")
+ err = ioutil.WriteFile(filePath, fromHostToContainer, 0644)
+ if err != nil {
+ os.Exit(1)
+ }
+ session := podmanTest.Podman([]string{"create", ALPINE, "ls", "foodir/"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"ps", "-a", "-q"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ name := session.OutputToString()
+
+ session = podmanTest.Podman([]string{"cp", filepath.Join(path, "cp_test.txt"), name + ":foodir/"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ session = podmanTest.Podman([]string{"start", "-a", name})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Equal("cp_test.txt"))
+
+ session = podmanTest.Podman([]string{"cp", name + ":foodir/cp_test.txt", path + "/receive/"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ c := exec.Command("cat", filepath.Join(path, "receive", "cp_test.txt"))
+ output, err := c.Output()
+ if err != nil {
+ os.Exit(1)
+ }
+ Expect(string(output)).To(Equal("copy from host to container directory"))
+ })
+})
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index b28a0c428..9a526b778 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -142,7 +142,7 @@ var _ = Describe("Podman create", func() {
}
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
os.Mkdir(mountPath, 0755)
- session := podmanTest.Podman([]string{"create", "--name", "test", "--rm", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session := podmanTest.Podman([]string{"create", "--name", "test", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test"})
@@ -153,7 +153,7 @@ var _ = Describe("Podman create", func() {
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("/create/test rw"))
- session = podmanTest.Podman([]string{"create", "--name", "test_ro", "--rm", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,ro", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session = podmanTest.Podman([]string{"create", "--name", "test_ro", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,ro", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test_ro"})
@@ -164,7 +164,7 @@ var _ = Describe("Podman create", func() {
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("/create/test ro"))
- session = podmanTest.Podman([]string{"create", "--name", "test_shared", "--rm", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,shared", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session = podmanTest.Podman([]string{"create", "--name", "test_shared", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,shared", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test_shared"})
@@ -180,7 +180,7 @@ var _ = Describe("Podman create", func() {
mountPath = filepath.Join(podmanTest.TempDir, "scratchpad")
os.Mkdir(mountPath, 0755)
- session = podmanTest.Podman([]string{"create", "--name", "test_tmpfs", "--rm", "--mount", "type=tmpfs,target=/create/test", ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session = podmanTest.Podman([]string{"create", "--name", "test_tmpfs", "--mount", "type=tmpfs,target=/create/test", ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test_tmpfs"})
diff --git a/test/e2e/e2e.coverprofile b/test/e2e/e2e.coverprofile
new file mode 100644
index 000000000..b5382604f
--- /dev/null
+++ b/test/e2e/e2e.coverprofile
@@ -0,0 +1,11 @@
+mode: atomic
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:14.46,21.20 2 1
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:31.2,31.19 1 1
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:38.2,38.53 1 1
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:65.2,65.52 1 1
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:21.20,23.17 2 2
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:26.3,28.36 3 2
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:23.17,25.4 1 0
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:31.19,36.3 4 2
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:38.53,63.3 20 1
+github.com/containers/libpod/test/e2e/pod_pod_namespaces.go:65.52,90.3 20 1
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index 4b4baa93c..33e05b872 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -28,8 +28,8 @@ func (p *PodmanTestIntegration) Podman(args []string) *PodmanSessionIntegration
}
// PodmanAsUser is the exec call to podman on the filesystem with the specified uid/gid and environment
-func (p *PodmanTestIntegration) PodmanAsUser(args []string, uid, gid uint32, env []string) *PodmanSessionIntegration {
- podmanSession := p.PodmanAsUserBase(args, uid, gid, env)
+func (p *PodmanTestIntegration) PodmanAsUser(args []string, uid, gid uint32, cwd string, env []string) *PodmanSessionIntegration {
+ podmanSession := p.PodmanAsUserBase(args, uid, gid, cwd, env)
return &PodmanSessionIntegration{podmanSession}
}
diff --git a/test/e2e/load_test.go b/test/e2e/load_test.go
index 423f99ac8..c85810454 100644
--- a/test/e2e/load_test.go
+++ b/test/e2e/load_test.go
@@ -199,11 +199,9 @@ var _ = Describe("Podman load", func() {
It("podman load localhost registry from scratch and :latest", func() {
outfile := filepath.Join(podmanTest.TempDir, "load_test.tar.gz")
- setup := podmanTest.Podman([]string{"pull", fedoraMinimal})
- setup.WaitWithDefaultTimeout()
- Expect(setup.ExitCode()).To(Equal(0))
+ podmanTest.RestoreArtifact("fedora-minimal:latest")
- setup = podmanTest.Podman([]string{"tag", "fedora-minimal", "hello"})
+ setup := podmanTest.Podman([]string{"tag", "fedora-minimal", "hello"})
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go
index 161bf7f9c..ed5002ca7 100644
--- a/test/e2e/pod_infra_container_test.go
+++ b/test/e2e/pod_infra_container_test.go
@@ -309,4 +309,55 @@ var _ = Describe("Podman pod create", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
+
+ It("podman run in pod starts infra", func() {
+ session := podmanTest.Podman([]string{"pod", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ podID := session.OutputToString()
+
+ result := podmanTest.Podman([]string{"ps", "-aq"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ infraID := result.OutputToString()
+
+ result = podmanTest.Podman([]string{"run", "--pod", podID, "-d", ALPINE, "top"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+
+ result = podmanTest.Podman([]string{"ps", "-aq"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(len(result.OutputToStringArray())).Should(BeNumerically(">", 0))
+
+ Expect(result.OutputToString()).To(ContainSubstring(infraID))
+ })
+
+ It("podman start in pod starts infra", func() {
+ session := podmanTest.Podman([]string{"pod", "create"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ podID := session.OutputToString()
+
+ result := podmanTest.Podman([]string{"ps", "-aq"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ infraID := result.OutputToString()
+
+ result = podmanTest.Podman([]string{"create", "--pod", podID, ALPINE, "ls"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ ctrID := result.OutputToString()
+
+ result = podmanTest.Podman([]string{"start", ctrID})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+
+ result = podmanTest.Podman([]string{"ps", "-aq"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(len(result.OutputToStringArray())).Should(BeNumerically(">", 0))
+
+ Expect(result.OutputToString()).To(ContainSubstring(infraID))
+ })
})
diff --git a/test/e2e/pod_start_test.go b/test/e2e/pod_start_test.go
index 77e8b586d..ce693012d 100644
--- a/test/e2e/pod_start_test.go
+++ b/test/e2e/pod_start_test.go
@@ -136,6 +136,5 @@ var _ = Describe("Podman pod start", func() {
session = podmanTest.Podman([]string{"pod", "start", podid, "doesnotexist"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
})
})
diff --git a/test/e2e/pod_stats_test.go b/test/e2e/pod_stats_test.go
index 43d089a24..e330c3a39 100644
--- a/test/e2e/pod_stats_test.go
+++ b/test/e2e/pod_stats_test.go
@@ -147,5 +147,28 @@ var _ = Describe("Podman pod stats", func() {
Expect(stats.ExitCode()).To(Equal(0))
Expect(stats.IsJSONOutputValid()).To(BeTrue())
})
+ It("podman stats with GO template", func() {
+ _, ec, podid := podmanTest.CreatePod("")
+ Expect(ec).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("", podid)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ stats := podmanTest.Podman([]string{"pod", "stats", "-a", "--no-reset", "--no-stream", "--format", "\"table {{.CID}} {{.Pod}} {{.Mem}} {{.MemUsage}} {{.CPU}} {{.NetIO}} {{.BlockIO}} {{.PIDS}} {{.Pod}}\""})
+ stats.WaitWithDefaultTimeout()
+ Expect(stats.ExitCode()).To(Equal(0))
+ })
+
+ It("podman stats with invalid GO template", func() {
+ _, ec, podid := podmanTest.CreatePod("")
+ Expect(ec).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("", podid)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ stats := podmanTest.Podman([]string{"pod", "stats", "-a", "--no-reset", "--no-stream", "--format", "\"table {{.ID}} \""})
+ stats.WaitWithDefaultTimeout()
+ Expect(stats.ExitCode()).ToNot(Equal(0))
+ })
})
diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go
index b3d7df252..38f118964 100644
--- a/test/e2e/pod_stop_test.go
+++ b/test/e2e/pod_stop_test.go
@@ -137,6 +137,5 @@ var _ = Describe("Podman pod stop", func() {
session = podmanTest.Podman([]string{"pod", "stop", podid1, "doesnotexist"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
})
})
diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go
index bfae15152..faad8202e 100644
--- a/test/e2e/pull_test.go
+++ b/test/e2e/pull_test.go
@@ -163,4 +163,20 @@ var _ = Describe("Podman pull", func() {
Expect(pull.OutputToString()).To(ContainSubstring(shortImageId))
})
+
+ It("podman pull check all tags", func() {
+ session := podmanTest.Podman([]string{"pull", "--all-tags", "alpine"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOuputStartsWith("Pulled Images:")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"images"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 4))
+
+ rmi := podmanTest.Podman([]string{"rmi", "-a", "-f"})
+ rmi.WaitWithDefaultTimeout()
+ Expect(rmi.ExitCode()).To(Equal(0))
+ })
})
diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go
index 3c77444d8..5c914a367 100644
--- a/test/e2e/restart_test.go
+++ b/test/e2e/restart_test.go
@@ -151,7 +151,7 @@ var _ = Describe("Podman restart", func() {
startTime := podmanTest.Podman([]string{"inspect", "--format='{{.State.StartedAt}}'", "test1", "test2"})
startTime.WaitWithDefaultTimeout()
- session := podmanTest.Podman([]string{"restart", "-all"})
+ session := podmanTest.Podman([]string{"restart", "--all"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
restartTime := podmanTest.Podman([]string{"inspect", "--format='{{.State.StartedAt}}'", "test1", "test2"})
diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go
index bc1431bce..71dacfa80 100644
--- a/test/e2e/rm_test.go
+++ b/test/e2e/rm_test.go
@@ -128,4 +128,9 @@ var _ = Describe("Podman rm", func() {
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
})
+ It("podman rm bogus container", func() {
+ session := podmanTest.Podman([]string{"rm", "bogus"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(1))
+ })
})
diff --git a/test/e2e/rmi_test.go b/test/e2e/rmi_test.go
index c160e1bc5..dcbda2df4 100644
--- a/test/e2e/rmi_test.go
+++ b/test/e2e/rmi_test.go
@@ -36,7 +36,7 @@ var _ = Describe("Podman rmi", func() {
It("podman rmi bogus image", func() {
session := podmanTest.Podman([]string{"rmi", "debian:6.0.10"})
session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(125))
+ Expect(session.ExitCode()).To(Equal(1))
})
diff --git a/test/e2e/rootless_test.go b/test/e2e/rootless_test.go
index 2b84d34c9..aa8ed6faa 100644
--- a/test/e2e/rootless_test.go
+++ b/test/e2e/rootless_test.go
@@ -60,7 +60,7 @@ var _ = Describe("Podman rootless", func() {
for _, v := range commands {
env := os.Environ()
env = append(env, "USER=foo")
- cmd := podmanTest.PodmanAsUser([]string{v}, 1000, 1000, env)
+ cmd := podmanTest.PodmanAsUser([]string{v}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
}
@@ -128,13 +128,13 @@ var _ = Describe("Podman rootless", func() {
env = append(env, "PODMAN_ALLOW_SINGLE_ID_MAPPING_IN_USERNS=1")
env = append(env, "USER=foo")
- cmd := rootlessTest.PodmanAsUser([]string{"pod", "create", "--infra=false"}, 1000, 1000, env)
+ cmd := rootlessTest.PodmanAsUser([]string{"pod", "create", "--infra=false"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
podId := cmd.OutputToString()
args := []string{"run", "--pod", podId, "--rootfs", mountPath, "echo", "hello"}
- cmd = rootlessTest.PodmanAsUser(args, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser(args, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
Expect(cmd.LineInOutputContains("hello")).To(BeTrue())
@@ -158,7 +158,7 @@ var _ = Describe("Podman rootless", func() {
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", xdgRuntimeDir))
env = append(env, fmt.Sprintf("HOME=%s", home))
env = append(env, "USER=foo")
- cmd := podmanTest.PodmanAsUser([]string{"search", "docker.io/busybox"}, 1000, 1000, env)
+ cmd := podmanTest.PodmanAsUser([]string{"search", "docker.io/busybox"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
})
@@ -175,65 +175,65 @@ var _ = Describe("Podman rootless", func() {
allArgs := append([]string{"run"}, args...)
allArgs = append(allArgs, "--rootfs", mountPath, "echo", "hello")
- cmd := rootlessTest.PodmanAsUser(allArgs, 1000, 1000, env)
+ cmd := rootlessTest.PodmanAsUser(allArgs, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
Expect(cmd.LineInOutputContains("hello")).To(BeTrue())
- cmd = rootlessTest.PodmanAsUser([]string{"rm", "-l", "-f"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"rm", "-l", "-f"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
allArgs = append([]string{"run", "-d"}, args...)
allArgs = append(allArgs, "--security-opt", "seccomp=unconfined", "--rootfs", mountPath, "top")
- cmd = rootlessTest.PodmanAsUser(allArgs, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser(allArgs, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
- cmd = rootlessTest.PodmanAsUser([]string{"restart", "-l", "-t", "0"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"restart", "-l", "-t", "0"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
canUseExec := canExec()
if canUseExec {
- cmd = rootlessTest.PodmanAsUser([]string{"top", "-l"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"top", "-l"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
}
- cmd = rootlessTest.PodmanAsUser([]string{"rm", "-l", "-f"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"rm", "-l", "-f"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
allArgs = append([]string{"run", "-d"}, args...)
allArgs = append(allArgs, "--security-opt", "seccomp=unconfined", "--rootfs", mountPath, "unshare", "-r", "unshare", "-r", "top")
- cmd = rootlessTest.PodmanAsUser(allArgs, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser(allArgs, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
- cmd = rootlessTest.PodmanAsUser([]string{"stop", "-l", "-t", "0"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"stop", "-l", "-t", "0"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
- cmd = rootlessTest.PodmanAsUser([]string{"inspect", "-l", "--type", "container", "--format", "{{ .State.Status }}"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"inspect", "-l", "--type", "container", "--format", "{{ .State.Status }}"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.LineInOutputContains("exited")).To(BeTrue())
- cmd = rootlessTest.PodmanAsUser([]string{"start", "-l"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"start", "-l"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
- cmd = rootlessTest.PodmanAsUser([]string{"stop", "-l", "-t", "0"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"stop", "-l", "-t", "0"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
- cmd = rootlessTest.PodmanAsUser([]string{"start", "-l"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"start", "-l"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
if len(args) == 0 {
- cmd = rootlessTest.PodmanAsUser([]string{"inspect", "-l"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"inspect", "-l"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
data := cmd.InspectContainerToJSON()
@@ -244,24 +244,23 @@ var _ = Describe("Podman rootless", func() {
Skip("ioctl(NS_GET_PARENT) not supported.")
}
- cmd = rootlessTest.PodmanAsUser([]string{"exec", "-l", "echo", "hello"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"exec", "-l", "echo", "hello"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
Expect(cmd.LineInOutputContains("hello")).To(BeTrue())
- cmd = rootlessTest.PodmanAsUser([]string{"ps", "-l", "-q"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"ps", "-l", "-q"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
cid := cmd.OutputToString()
- cmd = rootlessTest.PodmanAsUser([]string{"exec", "-l", "sh", "-c", "echo SeCreTMessage > /file"}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"exec", "-l", "sh", "-c", "echo SeCreTMessage > /file"}, 1000, 1000, "", env)
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
- path := filepath.Join(home, "export.tar")
- cmd = rootlessTest.PodmanAsUser([]string{"export", "-o", path, cid}, 1000, 1000, env)
+ cmd = rootlessTest.PodmanAsUser([]string{"export", "-o", "export.tar", cid}, 1000, 1000, home, env)
cmd.WaitWithDefaultTimeout()
- content, err := ioutil.ReadFile(path)
+ content, err := ioutil.ReadFile(filepath.Join(home, "export.tar"))
Expect(err).To(BeNil())
Expect(strings.Contains(string(content), "SeCreTMessage")).To(BeTrue())
}
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 1c09a4d0b..a07e4d047 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -36,19 +36,19 @@ var _ = Describe("Podman run networking", func() {
})
It("podman run network connection with default bridge", func() {
- session := podmanTest.Podman([]string{"run", "-dt", ALPINE, "wget", "www.projectatomic.io"})
+ session := podmanTest.Podman([]string{"run", "-dt", ALPINE, "wget", "www.podman.io"})
session.Wait(90)
Expect(session.ExitCode()).To(Equal(0))
})
It("podman run network connection with host", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--network", "host", ALPINE, "wget", "www.projectatomic.io"})
+ session := podmanTest.Podman([]string{"run", "-dt", "--network", "host", ALPINE, "wget", "www.podman.io"})
session.Wait(90)
Expect(session.ExitCode()).To(Equal(0))
})
It("podman run network connection with loopback", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--network", "host", ALPINE, "wget", "www.projectatomic.io"})
+ session := podmanTest.Podman([]string{"run", "-dt", "--network", "host", ALPINE, "wget", "www.podman.io"})
session.Wait(90)
Expect(session.ExitCode()).To(Equal(0))
})
@@ -178,4 +178,37 @@ var _ = Describe("Podman run networking", func() {
Expect(exec4.ExitCode()).To(Equal(0))
Expect(exec4.OutputToString()).To(ContainSubstring("192.0.2.2 test1"))
})
+
+ It("podman run network in user created network namespace", func() {
+ if Containerized() {
+ Skip("Can not be run within a container.")
+ }
+ SystemExec("ip", []string{"netns", "add", "xxx"})
+ session := podmanTest.Podman([]string{"run", "-dt", "--net", "ns:/run/netns/xxx", ALPINE, "wget", "www.podman.io"})
+ session.Wait(90)
+ Expect(session.ExitCode()).To(Equal(0))
+ SystemExec("ip", []string{"netns", "delete", "xxx"})
+ })
+
+ It("podman run n user created network namespace with resolv.conf", func() {
+ if Containerized() {
+ Skip("Can not be run within a container.")
+ }
+ SystemExec("ip", []string{"netns", "add", "xxx"})
+ SystemExec("mkdir", []string{"-p", "/etc/netns/xxx"})
+ SystemExec("bash", []string{"-c", "echo nameserver 11.11.11.11 > /etc/netns/xxx/resolv.conf"})
+ session := podmanTest.Podman([]string{"run", "--net", "ns:/run/netns/xxx", ALPINE, "cat", "/etc/resolv.conf"})
+ session.Wait(90)
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("11.11.11.11"))
+ SystemExec("ip", []string{"netns", "delete", "xxx"})
+ SystemExec("rm", []string{"-rf", "/etc/netns/xxx"})
+ })
+
+ It("podman run network in bogus user created network namespace", func() {
+ session := podmanTest.Podman([]string{"run", "-dt", "--net", "ns:/run/netns/xxy", ALPINE, "wget", "www.podman.io"})
+ session.Wait(90)
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ Expect(session.ErrorToString()).To(ContainSubstring("stat /run/netns/xxy: no such file or directory"))
+ })
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 22a36bb6c..6bd49de33 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -322,7 +322,7 @@ var _ = Describe("Podman run", func() {
os.Setenv("NOTIFY_SOCKET", sock)
defer os.Unsetenv("NOTIFY_SOCKET")
- session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "printenv", "NOTIFY_SOCKET"})
+ session := podmanTest.Podman([]string{"run", ALPINE, "printenv", "NOTIFY_SOCKET"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 0))
diff --git a/test/e2e/save_test.go b/test/e2e/save_test.go
index b354492b8..9f64e49a7 100644
--- a/test/e2e/save_test.go
+++ b/test/e2e/save_test.go
@@ -1,5 +1,3 @@
-// +build !remoteclient
-
package integration
import (
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 1438fd97b..167f8fa25 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -63,10 +63,10 @@ var _ = Describe("Podman search", func() {
})
It("podman search single registry flag", func() {
- search := podmanTest.Podman([]string{"search", "registry.access.redhat.com/rhel7"})
+ search := podmanTest.Podman([]string{"search", "quay.io/libpod/gate:latest"})
search.WaitWithDefaultTimeout()
Expect(search.ExitCode()).To(Equal(0))
- Expect(search.LineInOutputContains("registry.access.redhat.com/rhel7")).To(BeTrue())
+ Expect(search.LineInOutputContains("quay.io/libpod/gate")).To(BeTrue())
})
It("podman search format flag", func() {
diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go
index 9e525786e..50ee63f2a 100644
--- a/test/e2e/volume_create_test.go
+++ b/test/e2e/volume_create_test.go
@@ -1,5 +1,3 @@
-// +build !remoteclient
-
package integration
import (
diff --git a/test/e2e/volume_inspect_test.go b/test/e2e/volume_inspect_test.go
index aacdbe8be..d0d5a601e 100644
--- a/test/e2e/volume_inspect_test.go
+++ b/test/e2e/volume_inspect_test.go
@@ -1,5 +1,3 @@
-// +build !remoteclient
-
package integration
import (
diff --git a/test/e2e/volume_ls_test.go b/test/e2e/volume_ls_test.go
index d2ee558c1..119d29d9b 100644
--- a/test/e2e/volume_ls_test.go
+++ b/test/e2e/volume_ls_test.go
@@ -1,5 +1,3 @@
-// +build !remoteclient
-
package integration
import (
diff --git a/test/e2e/volume_rm_test.go b/test/e2e/volume_rm_test.go
index 295b290e4..6a1e7d0e8 100644
--- a/test/e2e/volume_rm_test.go
+++ b/test/e2e/volume_rm_test.go
@@ -1,5 +1,3 @@
-// +build !remoteclient
-
package integration
import (
@@ -50,6 +48,7 @@ var _ = Describe("Podman volume rm", func() {
})
It("podman rm with --force flag", func() {
+ SkipIfRemote()
session := podmanTest.Podman([]string{"create", "-v", "myvol:/myvol", ALPINE, "ls"})
cid := session.OutputToString()
session.WaitWithDefaultTimeout()
diff --git a/test/trust_set_test.json b/test/trust_set_test.json
new file mode 100644
index 000000000..661e65922
--- /dev/null
+++ b/test/trust_set_test.json
@@ -0,0 +1,8 @@
+{
+ "default": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ],
+ "transports": null
+} \ No newline at end of file
diff --git a/test/utils/podmantest_test.go b/test/utils/podmantest_test.go
index 60e3e2a97..28f294a94 100644
--- a/test/utils/podmantest_test.go
+++ b/test/utils/podmantest_test.go
@@ -23,7 +23,7 @@ var _ = Describe("PodmanTest test", func() {
FakeOutputs["check"] = []string{"check"}
os.Setenv("HOOK_OPTION", "hook_option")
env := os.Environ()
- session := podmanTest.PodmanAsUserBase([]string{"check"}, 1000, 1000, env)
+ session := podmanTest.PodmanAsUserBase([]string{"check"}, 1000, 1000, "", env)
os.Unsetenv("HOOK_OPTION")
session.WaitWithDefaultTimeout()
Expect(session.Command.Process).ShouldNot(BeNil())
diff --git a/test/utils/utils.go b/test/utils/utils.go
index 23dcb95e3..098779321 100644
--- a/test/utils/utils.go
+++ b/test/utils/utils.go
@@ -61,7 +61,7 @@ func (p *PodmanTest) MakeOptions(args []string) []string {
// PodmanAsUserBase exec podman as user. uid and gid is set for credentials useage. env is used
// to record the env for debugging
-func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, env []string) *PodmanSession {
+func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, cwd string, env []string) *PodmanSession {
var command *exec.Cmd
podmanOptions := p.MakeOptions(args)
podmanBinary := p.PodmanBinary
@@ -74,14 +74,18 @@ func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, env []stri
fmt.Printf("Running: (env: %v) %s %s\n", env, podmanBinary, strings.Join(podmanOptions, " "))
}
if uid != 0 || gid != 0 {
- nsEnterOpts := append([]string{"--userspec", fmt.Sprintf("%d:%d", uid, gid), "/", podmanBinary}, podmanOptions...)
- command = exec.Command("chroot", nsEnterOpts...)
+ pythonCmd := fmt.Sprintf("import os; import sys; uid = %d; gid = %d; cwd = '%s'; os.setgid(gid); os.setuid(uid); os.chdir(cwd) if len(cwd)>0 else True; os.execv(sys.argv[1], sys.argv[1:])", gid, uid, cwd)
+ nsEnterOpts := append([]string{"-c", pythonCmd, podmanBinary}, podmanOptions...)
+ command = exec.Command("python", nsEnterOpts...)
} else {
command = exec.Command(podmanBinary, podmanOptions...)
}
if env != nil {
command.Env = env
}
+ if cwd != "" {
+ command.Dir = cwd
+ }
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
if err != nil {
@@ -92,7 +96,7 @@ func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, env []stri
// PodmanBase exec podman with default env.
func (p *PodmanTest) PodmanBase(args []string) *PodmanSession {
- return p.PodmanAsUserBase(args, 0, 0, nil)
+ return p.PodmanAsUserBase(args, 0, 0, "", nil)
}
// WaitForContainer waits on a started container
@@ -270,7 +274,7 @@ func (s *PodmanSession) LineInOuputStartsWith(term string) bool {
}
//LineInOutputContains returns true if a line in a
-// session output starts with the supplied string
+// session output contains the supplied string
func (s *PodmanSession) LineInOutputContains(term string) bool {
for _, i := range s.OutputToStringArray() {
if strings.Contains(i, term) {
diff --git a/transfer.md b/transfer.md
index af7904e5f..c2d472f08 100644
--- a/transfer.md
+++ b/transfer.md
@@ -37,11 +37,11 @@ There are other equivalents for these tools
| Existing Step | `Podman` (and friends) |
| :--- | :--- |
-| `docker attach` | [`podman exec`](./docs/podman-attach.1.md) |
+| `docker attach` | [`podman attach`](./docs/podman-attach.1.md) |
+| `docker cp` | [`podman cp`](./docs/podman-cp.1.md) |
| `docker build` | [`podman build`](./docs/podman-build.1.md) |
| `docker commit` | [`podman commit`](./docs/podman-commit.1.md) |
| `docker container`|[`podman container`](./docs/podman-container.1.md) |
-| `docker cp` | [`podman mount`](./docs/podman-cp.1.md) **** |
| `docker create` | [`podman create`](./docs/podman-create.1.md) |
| `docker diff` | [`podman diff`](./docs/podman-diff.1.md) |
| `docker export` | [`podman export`](./docs/podman-export.1.md) |
diff --git a/troubleshooting.md b/troubleshooting.md
index 3f66b56ef..24a1dc6cb 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -191,3 +191,66 @@ SELinux provides a boolean `container_manage_cgroup`, which allows container
processes to write to the cgroup file system. Turn on this boolean, on SELinux separated systems, to allow systemd to run properly in the container.
`setsebool -P container_manage_cgroup true`
+
+### 9) Newuidmap missing when running rootless Podman commands
+
+Rootless podman requires the newuidmap and newgidmap programs to be installed.
+
+#### Symptom
+
+If you are running podman or buildah as a not root user, you get an error complaining about
+a missing newuidmap executable.
+
+```
+podman run -ti fedora sh
+cannot find newuidmap: exec: "newuidmap": executable file not found in $PATH
+```
+
+#### Solution
+
+Install a version of shadow-utils that includes these executables. Note RHEL7 and Centos 7 will not have support for this until RHEL7.7 is released.
+
+### 10) podman fails to run in user namespace because /etc/subuid is not properly populated.
+
+Rootless podman requires the user running it to have a range of UIDs listed in /etc/subuid and /etc/subgid.
+
+#### Symptom
+
+If you are running podman or buildah as a user, you get an error complaining about
+a missing subuid ranges in /etc/subuid.
+
+```
+podman run -ti fedora sh
+No subuid ranges found for user "johndoe" in /etc/subuid
+```
+
+#### Solution
+
+Update the /etc/subuid and /etc/subgid with fields for users that look like:
+
+```
+cat /etc/subuid
+johndoe:100000:65536
+test:165536:65536
+```
+
+The format of this file is USERNAME:UID:RANGE
+
+* username as listed in /etc/passwd or getpwent.
+* The initial uid allocated for the user.
+* The size of the range of UIDs allocated for the user.
+
+This means johndoe is allocated UIDS 100000-165535 as well as his standard UID in the
+/etc/passwd file.
+
+You should ensure that each user has a unique range of uids, because overlapping UIDs,
+would potentially allow one user to attack another user.
+
+You could also use the usermod program to assign UIDs to a user.
+
+```
+usermod --add-subuids 200000-201000 --add-subgids 200000-201000 johndoe
+grep johndoe /etc/subuid /etc/subgid
+/etc/subuid:johndoe:200000:1001
+/etc/subgid:johndoe:200000:1001
+```
diff --git a/utils/utils.go b/utils/utils.go
index c7c5ab5cf..33b0eb1c5 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -4,11 +4,15 @@ import (
"bytes"
"fmt"
"io"
+ "os"
"os/exec"
"strings"
+ "github.com/containers/storage/pkg/archive"
systemdDbus "github.com/coreos/go-systemd/dbus"
"github.com/godbus/dbus"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// ExecCmd executes a command with args and returns its output as a string along
@@ -82,12 +86,9 @@ func newProp(name string, units interface{}) systemdDbus.Property {
}
}
-// DetachError is special error which returned in case of container detach.
-type DetachError struct{}
-
-func (DetachError) Error() string {
- return "detached from container"
-}
+// ErrDetach is an error indicating that the user manually detached from the
+// container.
+var ErrDetach = errors.New("detached from container")
// CopyDetachable is similar to io.Copy but support a detach key sequence to break out.
func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) {
@@ -108,7 +109,7 @@ func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, e
}
if i == len(keys)-1 {
// src.Close()
- return 0, DetachError{}
+ return 0, ErrDetach
}
nr, er = src.Read(buf)
}
@@ -141,3 +142,30 @@ func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, e
}
return written, err
}
+
+// UntarToFileSystem untars an os.file of a tarball to a destination in the filesystem
+func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOptions) error {
+ logrus.Debugf("untarring %s", tarball.Name())
+ return archive.Untar(tarball, dest, options)
+}
+
+// TarToFilesystem creates a tarball from source and writes to an os.file
+// provided
+func TarToFilesystem(source string, tarball *os.File) error {
+ tb, err := Tar(source)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(tarball, tb)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("wrote tarball file %s", tarball.Name())
+ return nil
+}
+
+// Tar creates a tarball from source and returns a readcloser of it
+func Tar(source string) (io.ReadCloser, error) {
+ logrus.Debugf("creating tarball of %s", source)
+ return archive.Tar(source, archive.Uncompressed)
+}
diff --git a/vendor.conf b/vendor.conf
index cd018e3c2..445f0844a 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -15,8 +15,11 @@ github.com/containerd/cgroups 39b18af02c4120960f517a3a4c2588fabb61d02c
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
github.com/containernetworking/cni v0.7.0-alpha1
github.com/containernetworking/plugins v0.7.4
-github.com/containers/image 67b1f789f2ce8a3654592a582fff26c396326236
-github.com/containers/storage v1.8
+github.com/containers/image v1.4
+github.com/vbauerster/mpb v3.3.4
+github.com/mattn/go-isatty v0.0.4
+github.com/VividCortex/ewma v1.1.1
+github.com/containers/storage v1.10
github.com/containers/psgo v1.1
github.com/coreos/go-systemd v14
github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c
@@ -46,7 +49,6 @@ github.com/imdario/mergo v0.3.6
github.com/json-iterator/go 1.1.5
github.com/modern-go/concurrent 1.0.3
github.com/modern-go/reflect2 v1.0.1
-github.com/mattn/go-runewidth v0.0.4
github.com/mistifyio/go-zfs v2.1.1
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
@@ -55,6 +57,7 @@ github.com/opencontainers/runc v1.0.0-rc6
github.com/opencontainers/runtime-spec 1722abf79c2f8f2675f47367f827c6491472cf27
github.com/opencontainers/runtime-tools v0.8.0
github.com/opencontainers/selinux v1.1
+github.com/opentracing/opentracing-go 25a84ff92183e2f8ac018ba1db54f8a07b3c0e04
github.com/ostreedev/ostree-go d0388bd827cfac6fa8eec760246eb8375674b2a0
github.com/pkg/errors v0.8.1
github.com/pmezard/go-difflib v1.0.0
@@ -67,9 +70,9 @@ github.com/spf13/pflag v1.0.3
github.com/stretchr/testify v1.3.0
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
github.com/tchap/go-patricia v2.2.6
+github.com/uber/jaeger-client-go 64f57863bf63d3842dbe79cdc793d57baaff9ab5
+github.com/uber/jaeger-lib d036253de8f5b698150d81b922486f1e8e7628ec
github.com/ulule/deepcopier ca99b135e50f526fde9cd88705f0ff2f3f95b77c
-# TODO: urfave/cli is not active anymore. We should transition to another library.
-github.com/urfave/cli b67dcf995b6a7b7f14fad5fcb7cc5441b05e814b
github.com/vbatts/tar-split v0.11.1
github.com/vishvananda/netlink v1.0.0
github.com/vishvananda/netns 13995c7128ccc8e51e9a6bd2b551020a27180abd
@@ -82,7 +85,6 @@ golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/gol
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text
golang.org/x/time 85acf8d2951cb2a3bde7632f9ff273ef0379bcbd https://github.com/golang/time
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync
-gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/inf.v0 v0.9.1
gopkg.in/mgo.v2 v2
gopkg.in/yaml.v2 v2.2.2
@@ -91,13 +93,13 @@ k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apim
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199
-github.com/containers/buildah e7ca330f923701dba8859f5c014d0a9a3f7f0a49
+github.com/containers/buildah v1.7
# TODO: Gotty has not been updated since 2012. Can we find replacement?
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
# do not go beyond the below commit as the next one requires a more recent
# docker which is in conflict with openshift/imagebuilder
github.com/fsouza/go-dockerclient 29c1814d12c072344bb91aac5d2ff719db39c523
-github.com/openshift/imagebuilder 474d0f9df2cbabf006bd2b1c263a7b0789e228e0
+github.com/openshift/imagebuilder 36823496a6868f72bc36282cc475eb8a070c0934
github.com/ulikunitz/xz v0.5.5
github.com/coreos/go-iptables v0.4.0
github.com/google/shlex c34317bd91bf98fab745d77b03933cf8769299fe
@@ -108,5 +110,7 @@ github.com/klauspost/cpuid v1.2.0
github.com/onsi/ginkgo v1.7.0
github.com/onsi/gomega v1.4.3
github.com/hpcloud/tail v1.0.0
-gopkg.in/fsnotify.v1 v1.4.2
gopkg.in/tomb.v1 v1
+github.com/spf13/cobra v0.0.3
+github.com/inconshreveable/mousetrap v1.0.0
+gopkg.in/fsnotify.v1 v1.4.7
diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/VividCortex/ewma/LICENSE
index 42a597e29..a78d643ed 100644
--- a/vendor/github.com/urfave/cli/LICENSE
+++ b/vendor/github.com/VividCortex/ewma/LICENSE
@@ -1,6 +1,6 @@
-MIT License
+The MIT License
-Copyright (c) 2016 Jeremy Saenz & Contributors
+Copyright (c) 2013 VividCortex
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md
new file mode 100644
index 000000000..7aab61b87
--- /dev/null
+++ b/vendor/github.com/VividCortex/ewma/README.md
@@ -0,0 +1,140 @@
+# EWMA [![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) ![Build Status](https://circleci.com/gh/VividCortex/moving_average.png?circle-token=1459fa37f9ca0e50cef05d1963146d96d47ea523)
+
+This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
+Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
+
+### Exponentially Weighted Moving Average
+
+An exponentially weighted moving average is a way to continuously compute a type of
+average for a series of numbers, as the numbers arrive. After a value in the series is
+added to the average, its weight in the average decreases exponentially over time. This
+biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
+their inexpensive computational and memory cost, as well as the fact that they represent
+the recent central tendency of the series of values.
+
+The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
+is biased towards recent history. The alpha must be between 0 and 1, and is typically
+a fairly small number, such as 0.04. We will discuss the choice of alpha later.
+
+The algorithm works thus, in pseudocode:
+
+1. Multiply the next number in the series by alpha.
+2. Multiply the current value of the average by 1 minus alpha.
+3. Add the result of steps 1 and 2, and store it as the new current value of the average.
+4. Repeat for each number in the series.
+
+There are special-case behaviors for how to initialize the current value, and these vary
+between implementations. One approach is to start with the first value in the series;
+another is to average the first 10 or so values in the series using an arithmetic average,
+and then begin the incremental updating of the average. Each method has pros and cons.
+
+It may help to look at it pictorially. Suppose the series has five numbers, and we choose
+alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
+
+![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png)
+
+Now let's take the moving average of those numbers. First we set the average to the value
+of the first number.
+
+![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png)
+
+Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
+them to generate a new value.
+
+![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png)
+
+This continues until we are done.
+
+![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png)
+
+Notice how each of the values in the series decays by half each time a new value
+is added, and the top of the bars in the lower portion of the image represents the
+size of the moving average. It is a smoothed, or low-pass, average of the original
+series.
+
+For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
+
+### Choosing Alpha
+
+Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
+that averages over the previous N samples. What is the average age of each sample? It is N/2.
+
+Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
+to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
+"Production and Operations Analysis" by Steven Nahmias.
+
+So, for example, if you have a time-series with samples once per second, and you want to get the
+moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
+is the constant alpha used for this repository's SimpleEWMA.
+
+### Implementations
+
+This repository contains two implementations of the EWMA algorithm, with different properties.
+
+The implementations all conform to the MovingAverage interface, and the constructor returns
+that type.
+
+Current implementations assume an implicit time interval of 1.0 between every sample added.
+That is, the passage of time is treated as though it's the same as the arrival of samples.
+If you need time-based decay when samples are not arriving precisely at set intervals, then
+this package will not support your needs at present.
+
+#### SimpleEWMA
+
+A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
+for multiple reasons. It has no warm-up period and it uses a constant
+decay. These properties let it use less memory. It will also behave
+differently when it's equal to zero, which is assumed to mean
+uninitialized, so if a value is likely to actually become zero over time,
+then any non-zero value will cause a sharp jump instead of a small change.
+
+#### VariableEWMA
+
+Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
+It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
+until you have added the required number of samples to it. It uses some memory to store the
+number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
+
+## Usage
+
+### API Documentation
+
+View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
+
+```go
+package main
+import "github.com/VividCortex/ewma"
+
+func main() {
+ samples := [100]float64{
+ 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
+ }
+
+ e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
+ a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
+
+ for _, f := range samples {
+ e.Add(f)
+ a.Add(f)
+ }
+
+ e.Value() //=> 13.577404704631077
+ a.Value() //=> 1.5806140565521463e-12
+}
+```
+
+## Contributing
+
+We only accept pull requests for minor fixes or improvements. This includes:
+
+* Small bug fixes
+* Typos
+* Documentation or comments
+
+Please open issues to discuss new features. Pull requests for new features will be rejected,
+so we recommend forking the repository and making changes in your fork for your use case.
+
+## License
+
+This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
+It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
diff --git a/vendor/github.com/VividCortex/ewma/ewma.go b/vendor/github.com/VividCortex/ewma/ewma.go
new file mode 100644
index 000000000..44d5d53e3
--- /dev/null
+++ b/vendor/github.com/VividCortex/ewma/ewma.go
@@ -0,0 +1,126 @@
+// Package ewma implements exponentially weighted moving averages.
+package ewma
+
+// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
+// Please see the LICENSE file for applicable license terms.
+
+const (
+ // By default, we average over a one-minute period, which means the average
+ // age of the metrics in the period is 30 seconds.
+ AVG_METRIC_AGE float64 = 30.0
+
+ // The formula for computing the decay factor from the average age comes
+ // from "Production and Operations Analysis" by Steven Nahmias.
+ DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
+
+ // For best results, the moving average should not be initialized to the
+ // samples it sees immediately. The book "Production and Operations
+ // Analysis" by Steven Nahmias suggests initializing the moving average to
+ // the mean of the first 10 samples. Until the VariableEwma has seen this
+ // many samples, it is not "ready" to be queried for the value of the
+ // moving average. This adds some memory cost.
+ WARMUP_SAMPLES uint8 = 10
+)
+
+// MovingAverage is the interface that computes a moving average over a time-
+// series stream of numbers. The average may be over a window or exponentially
+// decaying.
+type MovingAverage interface {
+ Add(float64)
+ Value() float64
+ Set(float64)
+}
+
+// NewMovingAverage constructs a MovingAverage that computes an average with the
+// desired characteristics in the moving window or exponential decay. If no
+// age is given, it constructs a default exponentially weighted implementation
+// that consumes minimal memory. The age is related to the decay factor alpha
+// by the formula given for the DECAY constant. It signifies the average age
+// of the samples as time goes to infinity.
+func NewMovingAverage(age ...float64) MovingAverage {
+ if len(age) == 0 || age[0] == AVG_METRIC_AGE {
+ return new(SimpleEWMA)
+ }
+ return &VariableEWMA{
+ decay: 2 / (age[0] + 1),
+ }
+}
+
+// A SimpleEWMA represents the exponentially weighted moving average of a
+// series of numbers. It WILL have different behavior than the VariableEWMA
+// for multiple reasons. It has no warm-up period and it uses a constant
+// decay. These properties let it use less memory. It will also behave
+// differently when it's equal to zero, which is assumed to mean
+// uninitialized, so if a value is likely to actually become zero over time,
+// then any non-zero value will cause a sharp jump instead of a small change.
+// However, note that this takes a long time, and the value may just
+// decays to a stable value that's close to zero, but which won't be mistaken
+// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
+type SimpleEWMA struct {
+ // The current value of the average. After adding with Add(), this is
+ // updated to reflect the average of all values seen thus far.
+ value float64
+}
+
+// Add adds a value to the series and updates the moving average.
+func (e *SimpleEWMA) Add(value float64) {
+ if e.value == 0 { // this is a proxy for "uninitialized"
+ e.value = value
+ } else {
+ e.value = (value * DECAY) + (e.value * (1 - DECAY))
+ }
+}
+
+// Value returns the current value of the moving average.
+func (e *SimpleEWMA) Value() float64 {
+ return e.value
+}
+
+// Set sets the EWMA's value.
+func (e *SimpleEWMA) Set(value float64) {
+ e.value = value
+}
+
+// VariableEWMA represents the exponentially weighted moving average of a series of
+// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
+type VariableEWMA struct {
+ // The multiplier factor by which the previous samples decay.
+ decay float64
+ // The current value of the average.
+ value float64
+ // The number of samples added to this instance.
+ count uint8
+}
+
+// Add adds a value to the series and updates the moving average.
+func (e *VariableEWMA) Add(value float64) {
+ switch {
+ case e.count < WARMUP_SAMPLES:
+ e.count++
+ e.value += value
+ case e.count == WARMUP_SAMPLES:
+ e.count++
+ e.value = e.value / float64(WARMUP_SAMPLES)
+ e.value = (value * e.decay) + (e.value * (1 - e.decay))
+ default:
+ e.value = (value * e.decay) + (e.value * (1 - e.decay))
+ }
+}
+
+// Value returns the current value of the average, or 0.0 if the series hasn't
+// warmed up yet.
+func (e *VariableEWMA) Value() float64 {
+ if e.count <= WARMUP_SAMPLES {
+ return 0.0
+ }
+
+ return e.value
+}
+
+// Set sets the EWMA's value.
+func (e *VariableEWMA) Set(value float64) {
+ e.value = value
+ if e.count <= WARMUP_SAMPLES {
+ e.count = WARMUP_SAMPLES + 1
+ }
+}
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index cbdb5c9f9..755bc348e 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -8,6 +8,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "time"
"github.com/containers/buildah/docker"
"github.com/containers/buildah/util"
@@ -25,7 +26,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.6-dev"
+ Version = "1.7"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -175,6 +176,15 @@ type Builder struct {
// after processing the AddCapabilities set, when running commands in the container.
// If a capability appears in both lists, it will be dropped.
DropCapabilities []string
+ // PrependedEmptyLayers are history entries that we'll add to a
+ // committed image, after any history items that we inherit from a base
+ // image, but before the history item for the layer that we're
+ // committing.
+ PrependedEmptyLayers []v1.History
+ // AppendedEmptyLayers are history entries that we'll add to a
+ // committed image after the history item for the layer that we're
+ // committing.
+ AppendedEmptyLayers []v1.History
CommonBuildOpts *CommonBuildOptions
// TopLayer is the top layer of the image
@@ -209,11 +219,24 @@ type BuilderInfo struct {
DefaultCapabilities []string
AddCapabilities []string
DropCapabilities []string
+ History []v1.History
}
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
// This is used in the inspect command to display Manifest and Config as string and not []byte.
func GetBuildInfo(b *Builder) BuilderInfo {
+ history := copyHistory(b.OCIv1.History)
+ history = append(history, copyHistory(b.PrependedEmptyLayers)...)
+ now := time.Now().UTC()
+ created := &now
+ history = append(history, v1.History{
+ Created: created,
+ CreatedBy: b.ImageCreatedBy,
+ Author: b.Maintainer(),
+ Comment: b.ImageHistoryComment,
+ EmptyLayer: false,
+ })
+ history = append(history, copyHistory(b.AppendedEmptyLayers)...)
return BuilderInfo{
Type: b.Type,
FromImage: b.FromImage,
@@ -239,6 +262,7 @@ func GetBuildInfo(b *Builder) BuilderInfo {
DefaultCapabilities: append([]string{}, util.DefaultCapabilities...),
AddCapabilities: append([]string{}, b.AddCapabilities...),
DropCapabilities: append([]string{}, b.DropCapabilities...),
+ History: history,
}
}
@@ -312,11 +336,6 @@ type BuilderOptions struct {
// needs to be pulled and the image name alone can not be resolved to a
// reference to a source image. No separator is implicitly added.
Registry string
- // Transport is a value which is prepended to the image's name, if it
- // needs to be pulled and the image name alone, or the image name and
- // the registry together, can not be resolved to a reference to a
- // source image. No separator is implicitly added.
- Transport string
// PullBlobDirectory is the name of a directory in which we'll attempt
// to store copies of layer blobs that we pull down, if any. It should
// already exist.
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 6a1400e61..9bcac1683 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -351,21 +351,6 @@ func runUsingChrootMain() {
defer stdoutRead.Close()
defer stderrRead.Close()
}
- // A helper that returns false if err is an error that would cause us
- // to give up.
- logIfNotRetryable := func(err error, what string) (retry bool) {
- if err == nil {
- return true
- }
- if errno, isErrno := err.(syscall.Errno); isErrno {
- switch errno {
- case syscall.EINTR, syscall.EAGAIN:
- return true
- }
- }
- logrus.Error(what)
- return false
- }
for readFd, writeFd := range relays {
if err := unix.SetNonblock(readFd, true); err != nil {
logrus.Errorf("error setting descriptor %d (%s) non-blocking: %v", readFd, fdDesc[readFd], err)
@@ -388,7 +373,7 @@ func runUsingChrootMain() {
fds = append(fds, unix.PollFd{Fd: int32(fd), Events: unix.POLLIN | unix.POLLHUP})
}
_, err := unix.Poll(fds, pollTimeout)
- if !logIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) {
return
}
removeFds := make(map[int]struct{})
@@ -405,7 +390,7 @@ func runUsingChrootMain() {
}
b := make([]byte, 8192)
nread, err := unix.Read(int(rfd.Fd), b)
- logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
+ util.LogIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
if nread > 0 {
if wfd, ok := relays[int(rfd.Fd)]; ok {
nwritten, err := buffers[wfd].Write(b[:nread])
@@ -422,7 +407,7 @@ func runUsingChrootMain() {
// from this descriptor, read as much as there is to read.
for rfd.Revents&unix.POLLHUP == unix.POLLHUP {
nr, err := unix.Read(int(rfd.Fd), b)
- logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
+ util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
if nr <= 0 {
break
}
@@ -447,7 +432,7 @@ func runUsingChrootMain() {
for wfd, buffer := range buffers {
if buffer.Len() > 0 {
nwritten, err := unix.Write(wfd, buffer.Bytes())
- logIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err))
+ util.LogIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err))
if nwritten >= 0 {
_ = buffer.Next(nwritten)
}
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 591ead4e6..da28bea61 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -67,6 +67,10 @@ type CommitOptions struct {
OnBuild []string
// Parent is the base image that this image was created by.
Parent string
+
+ // OmitTimestamp forces epoch 0 as created timestamp to allow for
+ // deterministic, content-addressable builds.
+ OmitTimestamp bool
}
// PushOptions can be used to alter how an image is copied somewhere.
@@ -97,6 +101,10 @@ type PushOptions struct {
// regenerate from on-disk layers, substituting them in the list of
// blobs to copy whenever possible.
BlobDirectory string
+ // Quiet is a boolean value that determines if minimal output to
+ // the user will be displayed, this is best used for logging.
+ // The default is false.
+ Quiet bool
}
// Commit writes the contents of the container, along with its updated
@@ -140,7 +148,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
}
}
- src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp)
+ src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp, options.OmitTimestamp)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
}
@@ -224,6 +232,9 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+ if options.Quiet {
+ options.ReportWriter = nil // Turns off logging output
+ }
blocked, err := isReferenceBlocked(dest, systemContext)
if err != nil {
return nil, "", errors.Wrapf(err, "error checking if pushing to registry for %q is blocked", transports.ImageName(dest))
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 114b8ca37..05b0abb23 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -574,3 +574,50 @@ func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
}
}
}
+
+// AddPrependedEmptyLayer adds an item to the history that we'll create when
+// commiting the image, after any history we inherit from the base image, but
+// before the history item that we'll use to describe the new layer that we're
+// adding.
+func (b *Builder) AddPrependedEmptyLayer(created *time.Time, createdBy, author, comment string) {
+ if created != nil {
+ copiedTimestamp := *created
+ created = &copiedTimestamp
+ }
+ b.PrependedEmptyLayers = append(b.PrependedEmptyLayers, ociv1.History{
+ Created: created,
+ CreatedBy: createdBy,
+ Author: author,
+ Comment: comment,
+ EmptyLayer: true,
+ })
+}
+
+// ClearPrependedEmptyLayers clears the list of history entries that we'll add
+// to the committed image before the entry for the layer that we're adding.
+func (b *Builder) ClearPrependedEmptyLayers() {
+ b.PrependedEmptyLayers = nil
+}
+
+// AddAppendedEmptyLayer adds an item to the history that we'll create when
+// commiting the image, after the history item that we'll use to describe the
+// new layer that we're adding.
+func (b *Builder) AddAppendedEmptyLayer(created *time.Time, createdBy, author, comment string) {
+ if created != nil {
+ copiedTimestamp := *created
+ created = &copiedTimestamp
+ }
+ b.AppendedEmptyLayers = append(b.AppendedEmptyLayers, ociv1.History{
+ Created: created,
+ CreatedBy: createdBy,
+ Author: author,
+ Comment: comment,
+ EmptyLayer: true,
+ })
+}
+
+// ClearAppendedEmptyLayers clears the list of history entries that we'll add
+// to the committed image after the entry for the layer that we're adding.
+func (b *Builder) ClearAppendedEmptyLayers() {
+ b.AppendedEmptyLayers = nil
+}
diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go
index 6847d36fd..c59be0e60 100644
--- a/vendor/github.com/containers/buildah/docker/types.go
+++ b/vendor/github.com/containers/buildah/docker/types.go
@@ -18,7 +18,7 @@ const TypeLayers = "layers"
const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
// github.com/moby/moby/image/rootfs.go
-// RootFS describes images root filesystem
+// V2S2RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into an interface that supports different implementations.
type V2S2RootFS struct {
@@ -27,7 +27,7 @@ type V2S2RootFS struct {
}
// github.com/moby/moby/image/image.go
-// History stores build commands that were used to create an image
+// V2S2History stores build commands that were used to create an image
type V2S2History struct {
// Created is the timestamp at which the image was created
Created time.Time `json:"created"`
@@ -158,7 +158,7 @@ type V1Image struct {
}
// github.com/moby/moby/image/image.go
-// Image stores the image configuration
+// V2Image stores the image configuration
type V2Image struct {
V1Image
Parent ID `json:"parent,omitempty"`
@@ -176,7 +176,7 @@ type V2Image struct {
}
// github.com/docker/distribution/manifest/versioned.go
-// Versioned provides a struct with the manifest schemaVersion and mediaType.
+// V2Versioned provides a struct with the manifest schemaVersion and mediaType.
// Incoming content with unknown schema version can be decoded against this
// struct to check the version.
type V2Versioned struct {
@@ -188,21 +188,21 @@ type V2Versioned struct {
}
// github.com/docker/distribution/manifest/schema1/manifest.go
-// FSLayer is a container struct for BlobSums defined in an image manifest
+// V2S1FSLayer is a container struct for BlobSums defined in an image manifest
type V2S1FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer
BlobSum digest.Digest `json:"blobSum"`
}
// github.com/docker/distribution/manifest/schema1/manifest.go
-// History stores unstructured v1 compatibility information
+// V2S1History stores unstructured v1 compatibility information
type V2S1History struct {
// V1Compatibility is the raw v1 compatibility information
V1Compatibility string `json:"v1Compatibility"`
}
// github.com/docker/distribution/manifest/schema1/manifest.go
-// Manifest provides the base accessible fields for working with V2 image
+// V2S1Manifest provides the base accessible fields for working with V2 image
// format in the registry.
type V2S1Manifest struct {
V2Versioned
@@ -225,7 +225,7 @@ type V2S1Manifest struct {
}
// github.com/docker/distribution/blobs.go
-// Descriptor describes targeted content. Used in conjunction with a blob
+// V2S2Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
@@ -250,7 +250,7 @@ type V2S2Descriptor struct {
}
// github.com/docker/distribution/manifest/schema2/manifest.go
-// Manifest defines a schema2 manifest.
+// V2S2Manifest defines a schema2 manifest.
type V2S2Manifest struct {
V2Versioned
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 47842db7d..b0876fb6d 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -58,6 +58,8 @@ type containerImageRef struct {
tarPath func(path string) (io.ReadCloser, error)
parent string
blobDirectory string
+ preEmptyLayers []v1.History
+ postEmptyLayers []v1.History
}
type containerImageSource struct {
@@ -396,6 +398,35 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
// Build history notes in the image configurations.
+ appendHistory := func(history []v1.History) {
+ for i := range history {
+ var created *time.Time
+ if history[i].Created != nil {
+ copiedTimestamp := *history[i].Created
+ created = &copiedTimestamp
+ }
+ onews := v1.History{
+ Created: created,
+ CreatedBy: history[i].CreatedBy,
+ Author: history[i].Author,
+ Comment: history[i].Comment,
+ EmptyLayer: true,
+ }
+ oimage.History = append(oimage.History, onews)
+ if created == nil {
+ created = &time.Time{}
+ }
+ dnews := docker.V2S2History{
+ Created: *created,
+ CreatedBy: history[i].CreatedBy,
+ Author: history[i].Author,
+ Comment: history[i].Comment,
+ EmptyLayer: true,
+ }
+ dimage.History = append(dimage.History, dnews)
+ }
+ }
+ appendHistory(i.preEmptyLayers)
onews := v1.History{
Created: &i.created,
CreatedBy: i.createdBy,
@@ -412,6 +443,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
EmptyLayer: false,
}
dimage.History = append(dimage.History, dnews)
+ appendHistory(i.postEmptyLayers)
dimage.Parent = docker.ID(digest.FromString(i.parent))
// Sanity check that we didn't just create a mismatch between non-empty layers in the
@@ -603,7 +635,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
}
-func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
+func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time, omitTimestamp bool) (types.ImageReference, error) {
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
@@ -630,6 +662,10 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
created = historyTimestamp.UTC()
}
+ if omitTimestamp {
+ created = time.Unix(0, 0)
+ }
+
ref := &containerImageRef{
store: b.store,
compression: compress,
@@ -650,6 +686,8 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
tarPath: b.tarPath(),
parent: parent,
blobDirectory: blobDirectory,
+ preEmptyLayers: b.PrependedEmptyLayers,
+ postEmptyLayers: b.AppendedEmptyLayers,
}
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 217bcfc79..d69eab52f 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -62,11 +62,6 @@ type BuildOptions struct {
// needs to be pulled and the image name alone can not be resolved to a
// reference to a source image. No separator is implicitly added.
Registry string
- // Transport is a value which is prepended to the image's name, if it
- // needs to be pulled and the image name alone, or the image name and
- // the registry together, can not be resolved to a reference to a
- // source image. No separator is implicitly added.
- Transport string
// IgnoreUnrecognizedInstructions tells us to just log instructions we
// don't recognize, and try to keep going.
IgnoreUnrecognizedInstructions bool
@@ -171,6 +166,8 @@ type BuildOptions struct {
ForceRmIntermediateCtrs bool
// BlobDirectory is a directory which we'll use for caching layer blobs.
BlobDirectory string
+ // Target the targeted FROM in the Dockerfile to build
+ Target string
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
@@ -184,7 +181,6 @@ type Executor struct {
builder *buildah.Builder
pullPolicy buildah.PullPolicy
registry string
- transport string
ignoreUnrecognizedInstructions bool
quiet bool
runtime string
@@ -580,7 +576,6 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
contextDir: options.ContextDirectory,
pullPolicy: options.PullPolicy,
registry: options.Registry,
- transport: options.Transport,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
quiet: options.Quiet,
runtime: options.Runtime,
@@ -670,7 +665,6 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s
FromImage: from,
PullPolicy: b.pullPolicy,
Registry: b.registry,
- Transport: b.transport,
PullBlobDirectory: b.blobDirectory,
SignaturePolicyPath: b.signaturePolicyPath,
ReportWriter: b.reportWriter,
@@ -783,7 +777,7 @@ func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) {
if b.output != "" {
imageRef, err = alltransports.ParseImageName(b.output)
if err != nil {
- candidates, _, err := util.ResolveName(b.output, "", b.systemContext, b.store)
+ candidates, _, _, err := util.ResolveName(b.output, "", b.systemContext, b.store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing target image name %q", b.output)
}
@@ -1311,13 +1305,17 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
// Check if we have a one line Dockerfile making layers irrelevant
// or the user told us to ignore layers.
- ignoreLayers := (len(stages) < 2 && len(stages[0].Node.Children) < 2) || (!b.layers && !b.noCache)
+ singleLineDockerfile := (len(stages) < 2 && len(stages[0].Node.Children) < 1)
+ ignoreLayers := singleLineDockerfile || !b.layers && !b.noCache
if ignoreLayers {
imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "")
if err != nil {
return "", nil, err
}
+ if singleLineDockerfile {
+ b.log("COMMIT %s", ref)
+ }
imageID = imgID
imageRef = ref
}
@@ -1437,6 +1435,13 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
if err != nil {
return "", nil, errors.Wrap(err, "error reading multiple stages")
}
+ if options.Target != "" {
+ stagesTargeted, ok := stages.ThroughTarget(options.Target)
+ if !ok {
+ return "", nil, errors.Errorf("The target %q was not found in the provided Dockerfile", options.Target)
+ }
+ stages = stagesTargeted
+ }
return exec.Build(ctx, stages)
}
@@ -1527,6 +1532,17 @@ func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
return lastErr
}
+func (b *Executor) EnsureContainerPath(path string) error {
+ _, err := os.Stat(filepath.Join(b.mountPoint, path))
+ if err != nil && os.IsNotExist(err) {
+ err = os.MkdirAll(filepath.Join(b.mountPoint, path), 0755)
+ }
+ if err != nil {
+ return errors.Wrapf(err, "error ensuring container path %q", path)
+ }
+ return nil
+}
+
// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input
// dockerfile content and will use ctxDir as the base include path.
//
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 7e7f97e49..01c2e733f 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -28,15 +28,14 @@ const (
minimumTruncatedIDLength = 3
)
-func pullAndFindImage(ctx context.Context, store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
+func pullAndFindImage(ctx context.Context, store storage.Store, transport string, imageName string, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
pullOptions := PullOptions{
ReportWriter: options.ReportWriter,
Store: store,
SystemContext: options.SystemContext,
- Transport: options.Transport,
BlobDirectory: options.PullBlobDirectory,
}
- ref, err := pullImage(ctx, store, imageName, pullOptions, sc)
+ ref, err := pullImage(ctx, store, transport, imageName, pullOptions, sc)
if err != nil {
logrus.Debugf("error pulling image %q: %v", imageName, err)
return nil, nil, err
@@ -101,16 +100,16 @@ func newContainerIDMappingOptions(idmapOptions *IDMappingOptions) storage.IDMapp
return options
}
-func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, *storage.Image, error) {
+func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, *storage.Image, error) {
type failure struct {
resolvedImageName string
err error
}
-
- candidates, searchRegistriesWereUsedButEmpty, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store)
+ candidates, transport, searchRegistriesWereUsedButEmpty, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", options.FromImage)
+ return nil, "", nil, errors.Wrapf(err, "error parsing reference to image %q", options.FromImage)
}
+
failures := []failure{}
for _, image := range candidates {
var err error
@@ -118,25 +117,25 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
if img, err := store.Image(image); err == nil && img != nil && strings.HasPrefix(img.ID, image) {
ref, err := is.Transport.ParseStoreReference(store, img.ID)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
+ return nil, "", nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
}
- return ref, img, nil
+ return ref, transport, img, nil
}
}
if options.PullPolicy == PullAlways {
- pulledImg, pulledReference, err := pullAndFindImage(ctx, store, image, options, systemContext)
+ pulledImg, pulledReference, err := pullAndFindImage(ctx, store, transport, image, options, systemContext)
if err != nil {
logrus.Debugf("unable to pull and read image %q: %v", image, err)
failures = append(failures, failure{resolvedImageName: image, err: err})
continue
}
- return pulledReference, pulledImg, nil
+ return pulledReference, transport, pulledImg, nil
}
srcRef, err := alltransports.ParseImageName(image)
if err != nil {
- if options.Transport == "" {
+ if transport == "" {
logrus.Debugf("error parsing image name %q: %v", image, err)
failures = append(failures, failure{
resolvedImageName: image,
@@ -144,12 +143,13 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
})
continue
}
- logrus.Debugf("error parsing image name %q as given, trying with transport %q: %v", image, options.Transport, err)
- transport := options.Transport
+ logrus.Debugf("error parsing image name %q as given, trying with transport %q: %v", image, transport, err)
+
+ trans := transport
if transport != util.DefaultTransport {
- transport = transport + ":"
+ trans = trans + ":"
}
- srcRef2, err := alltransports.ParseImageName(transport + image)
+ srcRef2, err := alltransports.ParseImageName(trans + image)
if err != nil {
logrus.Debugf("error parsing image name %q: %v", transport+image, err)
failures = append(failures, failure{
@@ -163,19 +163,19 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
destImage, err := localImageNameForReference(ctx, store, srcRef, options.FromImage)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef))
+ return nil, "", nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef))
}
if destImage == "" {
- return nil, nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
+ return nil, "", nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
}
ref, err := is.Transport.ParseStoreReference(store, destImage)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", destImage)
+ return nil, "", nil, errors.Wrapf(err, "error parsing reference to image %q", destImage)
}
img, err := is.Transport.GetStoreImage(store, ref)
if err == nil {
- return ref, img, nil
+ return ref, transport, img, nil
}
if errors.Cause(err) == storage.ErrImageUnknown && options.PullPolicy != PullIfMissing {
@@ -187,26 +187,26 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
continue
}
- pulledImg, pulledReference, err := pullAndFindImage(ctx, store, image, options, systemContext)
+ pulledImg, pulledReference, err := pullAndFindImage(ctx, store, transport, image, options, systemContext)
if err != nil {
logrus.Debugf("unable to pull and read image %q: %v", image, err)
failures = append(failures, failure{resolvedImageName: image, err: err})
continue
}
- return pulledReference, pulledImg, nil
+ return pulledReference, transport, pulledImg, nil
}
if len(failures) != len(candidates) {
- return nil, nil, fmt.Errorf("internal error: %d candidates (%#v) vs. %d failures (%#v)", len(candidates), candidates, len(failures), failures)
+ return nil, "", nil, fmt.Errorf("internal error: %d candidates (%#v) vs. %d failures (%#v)", len(candidates), candidates, len(failures), failures)
}
registriesConfPath := sysregistries.RegistriesConfPath(systemContext)
switch len(failures) {
case 0:
if searchRegistriesWereUsedButEmpty {
- return nil, nil, errors.Errorf("image name %q is a short name and no search registries are defined in %s.", options.FromImage, registriesConfPath)
+ return nil, "", nil, errors.Errorf("image name %q is a short name and no search registries are defined in %s.", options.FromImage, registriesConfPath)
}
- return nil, nil, fmt.Errorf("internal error: no pull candidates were available for %q for an unknown reason", options.FromImage)
+ return nil, "", nil, fmt.Errorf("internal error: no pull candidates were available for %q for an unknown reason", options.FromImage)
case 1:
err := failures[0].err
@@ -216,7 +216,7 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
if searchRegistriesWereUsedButEmpty {
err = errors.Wrapf(err, "(image name %q is a short name and no search registries are defined in %s)", options.FromImage, registriesConfPath)
}
- return nil, nil, err
+ return nil, "", nil, err
default:
// NOTE: a multi-line error string:
@@ -224,7 +224,7 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
for _, f := range failures {
e = e + fmt.Sprintf("\n* %q: %s", f.resolvedImageName, f.err.Error())
}
- return nil, nil, errors.New(e)
+ return nil, "", nil, errors.New(e)
}
}
@@ -250,21 +250,19 @@ func findUnusedContainer(name string, containers []storage.Container) string {
}
func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
- var ref types.ImageReference
- var img *storage.Image
- var err error
-
+ var (
+ ref types.ImageReference
+ img *storage.Image
+ err error
+ )
if options.FromImage == BaseImageFakeName {
options.FromImage = ""
}
- if options.Transport == "" {
- options.Transport = util.DefaultTransport
- }
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
if options.FromImage != "" && options.FromImage != "scratch" {
- ref, img, err = resolveImage(ctx, systemContext, store, options)
+ ref, _, img, err = resolveImage(ctx, systemContext, store, options)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index dc1b2bc69..09f951b35 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -13,263 +13,185 @@ import (
"github.com/containers/buildah/util"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/pflag"
)
-var (
- usernsFlags = []cli.Flag{
- cli.StringFlag{
- Name: "userns",
- Usage: "'container', `path` of user namespace to join, or 'host'",
- },
- cli.StringSliceFlag{
- Name: "userns-uid-map",
- Usage: "`containerID:hostID:length` UID mapping to use in user namespace",
- },
- cli.StringSliceFlag{
- Name: "userns-gid-map",
- Usage: "`containerID:hostID:length` GID mapping to use in user namespace",
- },
- cli.StringFlag{
- Name: "userns-uid-map-user",
- Usage: "`name` of entries from /etc/subuid to use to set user namespace UID mapping",
- },
- cli.StringFlag{
- Name: "userns-gid-map-group",
- Usage: "`name` of entries from /etc/subgid to use to set user namespace GID mapping",
- },
- }
+// LayerResults represents the results of the layer flags
+type LayerResults struct {
+ ForceRm bool
+ Layers bool
+}
- NamespaceFlags = []cli.Flag{
- cli.StringFlag{
- Name: string(specs.IPCNamespace),
- Usage: "'container', `path` of IPC namespace to join, or 'host'",
- },
- cli.StringFlag{
- Name: string(specs.NetworkNamespace) + ", net",
- Usage: "'container', `path` of network namespace to join, or 'host'",
- },
- cli.StringFlag{
- Name: "cni-config-dir",
- Usage: "`directory` of CNI configuration files",
- Value: util.DefaultCNIConfigDir,
- },
- cli.StringFlag{
- Name: "cni-plugin-path",
- Usage: "`path` of CNI network plugins",
- Value: util.DefaultCNIPluginPath,
- },
- cli.StringFlag{
- Name: string(specs.PIDNamespace),
- Usage: "'container', `path` of PID namespace to join, or 'host'",
- },
- cli.StringFlag{
- Name: string(specs.UTSNamespace),
- Usage: "'container', `path` of UTS namespace to join, or 'host'",
- },
- }
+// UserNSResults represents the results for the UserNS flags
+type UserNSResults struct {
+ UserNS string
+ UserNSUIDMap []string
+ UserNSGIDMap []string
+ UserNSUIDMapUser string
+ UserNSGIDMapGroup string
+}
- LayerFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "force-rm",
- Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
- },
- cli.BoolFlag{
- Name: "layers",
- Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()),
- },
- }
+// NameSpaceResults represents the results for Namespace flags
+type NameSpaceResults struct {
+ IPC string
+ Network string
+ CNIConfigDir string
+ CNIPlugInPath string
+ PID string
+ UTS string
+}
- BudFlags = []cli.Flag{
- cli.StringSliceFlag{
- Name: "annotation",
- Usage: "Set metadata for an image (default [])",
- },
- cli.StringFlag{
- Name: "authfile",
- Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
- },
- cli.StringSliceFlag{
- Name: "build-arg",
- Usage: "`argument=value` to supply to the builder",
- },
- cli.StringFlag{
- Name: "cache-from",
- Usage: "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Value: "",
- Usage: "use certificates at the specified path to access the registry",
- },
- cli.BoolFlag{
- Name: "compress",
- Usage: "This is legacy option, which has no effect on the image",
- },
- cli.StringFlag{
- Name: "creds",
- Value: "",
- Usage: "use `[username[:password]]` for accessing the registry",
- },
- cli.BoolFlag{
- Name: "disable-compression, D",
- Usage: "don't compress layers by default",
- },
- cli.BoolFlag{
- Name: "disable-content-trust",
- Usage: "This is a Docker specific option and is a NOOP",
- },
- cli.StringSliceFlag{
- Name: "file, f",
- Usage: "`pathname or URL` of a Dockerfile",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.",
- Value: DefaultFormat(),
- },
- cli.StringFlag{
- Name: "iidfile",
- Usage: "`file` to write the image ID to",
- },
- cli.StringSliceFlag{
- Name: "label",
- Usage: "Set metadata for an image (default [])",
- },
- cli.BoolFlag{
- Name: "no-cache",
- Usage: "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.",
- },
- cli.StringFlag{
- Name: "logfile",
- Usage: "log to `file` instead of stdout/stderr",
- },
- cli.IntFlag{
- Name: "loglevel",
- Usage: "adjust logging level (range from -2 to 3)",
- },
- cli.StringFlag{
- Name: "platform",
- Usage: "CLI compatibility: no action or effect",
- },
- cli.BoolTFlag{
- Name: "pull",
- Usage: "pull the image if not present",
- },
- cli.BoolFlag{
- Name: "pull-always",
- Usage: "pull the image, even if a version is present",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "refrain from announcing build instructions and image read/write progress",
- },
- cli.BoolTFlag{
- Name: "rm",
- Usage: "Remove intermediate containers after a successful build (default true)",
- },
- cli.StringFlag{
- Name: "runtime",
- Usage: "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.",
- Value: util.Runtime(),
- },
- cli.StringSliceFlag{
- Name: "runtime-flag",
- Usage: "add global flags for the container runtime",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`pathname` of signature policy file (not usually used)",
- },
- cli.BoolFlag{
- Name: "squash",
- Usage: "Squash newly built layers into a single new layer. The build process does not currently support caching so this is a NOOP.",
- },
- cli.StringSliceFlag{
- Name: "tag, t",
- Usage: "tagged `name` to apply to the built image",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "require HTTPS and verify certificates when accessing the registry",
- },
- }
+// BudResults represents the results for Bud flags
+type BudResults struct {
+ Annotation []string
+ Authfile string
+ BuildArg []string
+ CacheFrom string
+ CertDir string
+ Compress bool
+ Creds string
+ DisableCompression bool
+ DisableContentTrust bool
+ File []string
+ Format string
+ Iidfile string
+ Label []string
+ Logfile string
+ Loglevel int
+ NoCache bool
+ Platform string
+ Pull bool
+ PullAlways bool
+ Quiet bool
+ Rm bool
+ Runtime string
+ RuntimeFlags []string
+ SignaturePolicy string
+ Squash bool
+ Tag []string
+ Target string
+ TlsVerify bool
+}
- FromAndBudFlags = append(append([]cli.Flag{
- cli.StringSliceFlag{
- Name: "add-host",
- Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])",
- },
- cli.StringFlag{
- Name: "blob-cache",
- Value: "",
- Usage: "assume image blobs in the specified directory will be available for pushing",
- Hidden: true, // this is here mainly so that we can test the API during integration tests
- },
- cli.StringSliceFlag{
- Name: "cap-add",
- Usage: "add the specified capability when running (default [])",
- },
- cli.StringSliceFlag{
- Name: "cap-drop",
- Usage: "drop the specified capability when running (default [])",
- },
- cli.StringFlag{
- Name: "cgroup-parent",
- Usage: "optional parent cgroup for the container",
- },
- cli.Uint64Flag{
- Name: "cpu-period",
- Usage: "limit the CPU CFS (Completely Fair Scheduler) period",
- },
- cli.Int64Flag{
- Name: "cpu-quota",
- Usage: "limit the CPU CFS (Completely Fair Scheduler) quota",
- },
- cli.Uint64Flag{
- Name: "cpu-shares, c",
- Usage: "CPU shares (relative weight)",
- },
- cli.StringFlag{
- Name: "cpuset-cpus",
- Usage: "CPUs in which to allow execution (0-3, 0,1)",
- },
- cli.StringFlag{
- Name: "cpuset-mems",
- Usage: "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
- },
- cli.StringFlag{
- Name: "isolation",
- Usage: "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.",
- Value: DefaultIsolation(),
- },
- cli.StringFlag{
- Name: "memory, m",
- Usage: "memory limit (format: <number>[<unit>], where unit = b, k, m or g)",
- },
- cli.StringFlag{
- Name: "memory-swap",
- Usage: "swap limit equal to memory plus swap: '-1' to enable unlimited swap",
- },
- cli.StringSliceFlag{
- Name: "security-opt",
- Usage: "security options (default [])",
- },
- cli.StringFlag{
- Name: "shm-size",
- Usage: "size of '/dev/shm'. The format is `<number><unit>`.",
- Value: "65536k",
- },
- cli.StringSliceFlag{
- Name: "ulimit",
- Usage: "ulimit options (default [])",
- },
- cli.StringSliceFlag{
- Name: "volume, v",
- Usage: "bind mount a volume into the container (default [])",
- },
- }, usernsFlags...), NamespaceFlags...)
-)
+// FromAndBugResults represents the results for common flags
+// in bud and from
+type FromAndBudResults struct {
+ AddHost []string
+ BlobCache string
+ CapAdd []string
+ CapDrop []string
+ CgroupParent string
+ CPUPeriod uint64
+ CPUQuota int64
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares uint64
+ Isolation string
+ Memory string
+ MemorySwap string
+ SecurityOpt []string
+ ShmSize string
+ Ulimit []string
+ Volume []string
+}
+
+// GetUserNSFlags returns the common flags for usernamespace
+func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
+ usernsFlags := pflag.FlagSet{}
+ usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
+ usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerID:hostID:length` UID mapping to use in user namespace")
+ usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerID:hostID:length` GID mapping to use in user namespace")
+ usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
+ usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
+ return usernsFlags
+}
+
+// GetNameSpaceFlags returns the common flags for a namespace menu
+func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'container', `path` of IPC namespace to join, or 'host'")
+ fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'container', `path` of network namespace to join, or 'host'")
+ // TODO How do we alias net and network?
+ fs.StringVar(&flags.Network, "net", "", "'container', `path` of network namespace to join, or 'host'")
+ fs.MarkHidden("net")
+ fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", util.DefaultCNIConfigDir, "`directory` of CNI configuration files")
+ fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", util.DefaultCNIPluginPath, "`path` of CNI network plugins")
+ fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "container, `path` of PID namespace to join, or 'host'")
+ fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "container, :`path` of UTS namespace to join, or 'host'")
+ return fs
+}
+
+// GetLayerFlags returns the common flags for layers
+func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.")
+ fs.BoolVar(&flags.Layers, "layers", false, fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()))
+ return fs
+}
+
+// GetBudFlags returns common bud flags
+func GetBudFlags(flags *BudResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringSliceVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
+ fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
+ fs.StringSliceVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
+ fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
+ fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image")
+ fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
+ fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP")
+ fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
+ fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
+ fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
+ fs.StringSliceVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
+ fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
+ fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
+ fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
+ fs.StringVar(&flags.Platform, "platform", "", "CLI compatibility: no action or effect")
+ fs.BoolVar(&flags.Pull, "pull", true, "pull the image if not present")
+ fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image, even if a version is present")
+ fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
+ fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build (default true)")
+ fs.StringVar(&flags.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.")
+ fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
+ fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer. The build process does not currently support caching so this is a NOOP.")
+ fs.StringSliceVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
+ fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
+ fs.BoolVar(&flags.TlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
+ return fs
+}
+
+func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
+ fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
+ fs.MarkHidden("blob-cache")
+ fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
+ fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
+ fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
+ fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
+ fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
+ fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
+ fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
+ fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
+ fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
+ fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
+ fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
+ fs.StringSliceVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
+ fs.StringVar(&flags.ShmSize, "shm-size", "65536k", "size of '/dev/shm'. The format is `<number><unit>`.")
+ fs.StringSliceVar(&flags.Ulimit, "ulimit", []string{}, "ulimit options (default [])")
+ fs.StringSliceVarP(&flags.Volume, "volume", "v", []string{}, "bind mount a volume into the container (default [])")
+
+ // Add in the usernamespace and namespaceflags
+ usernsFlags := GetUserNSFlags(usernsResults)
+ namespaceFlags := GetNameSpaceFlags(namespaceResults)
+ fs.AddFlagSet(&usernsFlags)
+ fs.AddFlagSet(&namespaceFlags)
+
+ return fs
+}
// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
// otherwise it returns false
@@ -299,6 +221,15 @@ func DefaultIsolation() string {
return buildah.OCI
}
+// DefaultHistory returns the default add-history setting
+func DefaultHistory() bool {
+ history := os.Getenv("BUILDAH_HISTORY")
+ if strings.ToLower(history) == "true" || history == "1" {
+ return true
+ }
+ return false
+}
+
func VerifyFlagsArgsOrder(args []string) error {
for _, arg := range args {
if strings.HasPrefix(arg, "-") {
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 41fdea8b1..a26d15631 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -6,11 +6,10 @@ package parse
import (
"fmt"
+ "github.com/spf13/cobra"
"net"
"os"
"path/filepath"
- "reflect"
- "regexp"
"strconv"
"strings"
"unicode"
@@ -22,7 +21,6 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/sys/unix"
)
@@ -35,7 +33,7 @@ const (
)
// CommonBuildOptions parses the build options from the bud cli
-func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) {
+func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
var (
memoryLimit int64
memorySwap int64
@@ -49,47 +47,57 @@ func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) {
if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil {
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max))
}
- if c.String("memory") != "" {
- memoryLimit, err = units.RAMInBytes(c.String("memory"))
+ memVal, _ := c.Flags().GetString("memory")
+ if memVal != "" {
+ memoryLimit, err = units.RAMInBytes(memVal)
if err != nil {
return nil, errors.Wrapf(err, "invalid value for memory")
}
}
- if c.String("memory-swap") != "" {
- memorySwap, err = units.RAMInBytes(c.String("memory-swap"))
+
+ memSwapValue, _ := c.Flags().GetString("memory-swap")
+ if memSwapValue != "" {
+ memorySwap, err = units.RAMInBytes(memSwapValue)
if err != nil {
return nil, errors.Wrapf(err, "invalid value for memory-swap")
}
}
- if len(c.StringSlice("add-host")) > 0 {
- for _, host := range c.StringSlice("add-host") {
+
+ addHost, _ := c.Flags().GetStringSlice("add-host")
+ if len(addHost) > 0 {
+ for _, host := range addHost {
if err := validateExtraHost(host); err != nil {
return nil, errors.Wrapf(err, "invalid value for add-host")
}
}
}
- if _, err := units.FromHumanSize(c.String("shm-size")); err != nil {
+ if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil {
return nil, errors.Wrapf(err, "invalid --shm-size")
}
- if err := ParseVolumes(c.StringSlice("volume")); err != nil {
+ volumes, _ := c.Flags().GetStringSlice("volume")
+ if err := ParseVolumes(volumes); err != nil {
return nil, err
}
-
+ cpuPeriod, _ := c.Flags().GetUint64("cpu-period")
+ cpuQuota, _ := c.Flags().GetInt64("cpu-quota")
+ cpuShares, _ := c.Flags().GetUint64("cpu-shared")
+ ulimit, _ := c.Flags().GetStringSlice("ulimit")
commonOpts := &buildah.CommonBuildOptions{
- AddHost: c.StringSlice("add-host"),
- CgroupParent: c.String("cgroup-parent"),
- CPUPeriod: c.Uint64("cpu-period"),
- CPUQuota: c.Int64("cpu-quota"),
- CPUSetCPUs: c.String("cpuset-cpus"),
- CPUSetMems: c.String("cpuset-mems"),
- CPUShares: c.Uint64("cpu-shares"),
+ AddHost: addHost,
+ CgroupParent: c.Flag("cgroup-parent").Value.String(),
+ CPUPeriod: cpuPeriod,
+ CPUQuota: cpuQuota,
+ CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(),
+ CPUSetMems: c.Flag("cpuset-mems").Value.String(),
+ CPUShares: cpuShares,
Memory: memoryLimit,
MemorySwap: memorySwap,
- ShmSize: c.String("shm-size"),
- Ulimit: append(defaultLimits, c.StringSlice("ulimit")...),
- Volumes: c.StringSlice("volume"),
+ ShmSize: c.Flag("shm-size").Value.String(),
+ Ulimit: append(defaultLimits, ulimit...),
+ Volumes: volumes,
}
- if err := parseSecurityOpts(c.StringSlice("security-opt"), commonOpts); err != nil {
+ securityOpts, _ := c.Flags().GetStringSlice("security-opt")
+ if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
return nil, err
}
return commonOpts, nil
@@ -231,79 +239,45 @@ func validateIPAddress(val string) (string, error) {
return "", fmt.Errorf("%s is not an ip address", val)
}
-// ValidateFlags searches for StringFlags or StringSlice flags that never had
-// a value set. This commonly occurs when the CLI mistakenly takes the next
-// option and uses it as a value.
-func ValidateFlags(c *cli.Context, flags []cli.Flag) error {
- re, err := regexp.Compile("^-.+")
- if err != nil {
- return errors.Wrap(err, "compiling regex failed")
- }
-
- // The --cmd flag can have a following command i.e. --cmd="--help".
- // Let's skip this check just for the --cmd flag.
- for _, flag := range flags {
- switch reflect.TypeOf(flag).String() {
- case "cli.StringSliceFlag":
- {
- f := flag.(cli.StringSliceFlag)
- name := strings.Split(f.Name, ",")
- if f.Name == "cmd" {
- continue
- }
- val := c.StringSlice(name[0])
- for _, v := range val {
- if ok := re.MatchString(v); ok {
- return errors.Errorf("option --%s requires a value", name[0])
- }
- }
- }
- case "cli.StringFlag":
- {
- f := flag.(cli.StringFlag)
- name := strings.Split(f.Name, ",")
- if f.Name == "cmd" {
- continue
- }
- val := c.String(name[0])
- if ok := re.MatchString(val); ok {
- return errors.Errorf("option --%s requires a value", name[0])
- }
- }
- }
- }
- return nil
-}
-
// SystemContextFromOptions returns a SystemContext populated with values
// per the input parameters provided by the caller for the use in authentication.
-func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
+func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
+ certDir, err := c.Flags().GetString("cert-dir")
+ if err != nil {
+ certDir = ""
+ }
ctx := &types.SystemContext{
- DockerCertPath: c.String("cert-dir"),
+ DockerCertPath: certDir,
}
- if c.IsSet("tls-verify") {
- ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
- ctx.OCIInsecureSkipTLSVerify = !c.BoolT("tls-verify")
- ctx.DockerDaemonInsecureSkipTLSVerify = !c.BoolT("tls-verify")
+ tlsVerify, err := c.Flags().GetBool("tls-verify")
+ if err == nil && c.Flag("tls-verify").Changed {
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify)
+ ctx.OCIInsecureSkipTLSVerify = !tlsVerify
+ ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify
}
- if c.IsSet("creds") {
+ creds, err := c.Flags().GetString("creds")
+ if err == nil && c.Flag("creds").Changed {
var err error
- ctx.DockerAuthConfig, err = getDockerAuth(c.String("creds"))
+ ctx.DockerAuthConfig, err = getDockerAuth(creds)
if err != nil {
return nil, err
}
}
- if c.IsSet("signature-policy") {
- ctx.SignaturePolicyPath = c.String("signature-policy")
+ sigPolicy, err := c.Flags().GetString("signature-policy")
+ if err == nil && c.Flag("signature-policy").Changed {
+ ctx.SignaturePolicyPath = sigPolicy
}
- if c.IsSet("authfile") {
- ctx.AuthFilePath = c.String("authfile")
+ authfile, err := c.Flags().GetString("authfile")
+ if err == nil && c.Flag("authfile").Changed {
+ ctx.AuthFilePath = authfile
}
- if c.GlobalIsSet("registries-conf") {
- ctx.SystemRegistriesConfPath = c.GlobalString("registries-conf")
+ regConf, err := c.Flags().GetString("registries-conf")
+ if err == nil && c.Flag("registries-conf").Changed {
+ ctx.SystemRegistriesConfPath = regConf
}
- if c.GlobalIsSet("registries-conf-dir") {
- ctx.RegistriesDirPath = c.GlobalString("registries-conf-dir")
+ regConfDir, err := c.Flags().GetString("registries-conf-dir")
+ if err == nil && c.Flag("registries-conf-dir").Changed {
+ ctx.RegistriesDirPath = regConfDir
}
ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", buildah.Version)
return ctx, nil
@@ -345,9 +319,9 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
}
// IDMappingOptions parses the build options related to user namespaces and ID mapping.
-func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, idmapOptions *buildah.IDMappingOptions, err error) {
- user := c.String("userns-uid-map-user")
- group := c.String("userns-gid-map-group")
+func IDMappingOptions(c *cobra.Command) (usernsOptions buildah.NamespaceOptions, idmapOptions *buildah.IDMappingOptions, err error) {
+ user := c.Flag("userns-uid-map-user").Value.String()
+ group := c.Flag("userns-gid-map-group").Value.String()
// If only the user or group was specified, use the same value for the
// other, since we need both in order to initialize the maps using the
// names.
@@ -366,6 +340,7 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
}
mappings = submappings
}
+ globalOptions := c.PersistentFlags()
// We'll parse the UID and GID mapping options the same way.
buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) {
outmap := make([]specs.LinuxIDMapping, 0, len(basemap))
@@ -380,11 +355,11 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
// Parse the flag's value as one or more triples (if it's even
// been set), and append them.
var spec []string
- if c.GlobalIsSet(option) {
- spec = c.GlobalStringSlice(option)
+ if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed {
+ spec, _ = globalOptions.GetStringSlice(option)
}
- if c.IsSet(option) {
- spec = c.StringSlice(option)
+ if c.Flag(option).Changed {
+ spec, _ = c.Flags().GetStringSlice(option)
}
idmap, err := parseIDMap(spec)
if err != nil {
@@ -424,8 +399,8 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
}
// If the user specifically requested that we either use or don't use
// user namespaces, override that default.
- if c.IsSet("userns") {
- how := c.String("userns")
+ if c.Flag("userns").Changed {
+ how := c.Flag("userns").Value.String()
switch how {
case "", "container":
usernsOption.Host = false
@@ -440,7 +415,12 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
}
}
usernsOptions = buildah.NamespaceOptions{usernsOption}
- if !c.IsSet("net") {
+
+ // Because --net and --network are technically two different flags, we need
+ // to check each for nil and .Changed
+ usernet := c.Flags().Lookup("net")
+ usernetwork := c.Flags().Lookup("network")
+ if (usernet != nil && usernetwork != nil) && (!usernet.Changed && !usernetwork.Changed) {
usernsOptions = append(usernsOptions, buildah.NamespaceOption{
Name: string(specs.NetworkNamespace),
Host: usernsOption.Host,
@@ -486,12 +466,12 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) {
}
// NamespaceOptions parses the build options for all namespaces except for user namespace.
-func NamespaceOptions(c *cli.Context) (namespaceOptions buildah.NamespaceOptions, networkPolicy buildah.NetworkConfigurationPolicy, err error) {
+func NamespaceOptions(c *cobra.Command) (namespaceOptions buildah.NamespaceOptions, networkPolicy buildah.NetworkConfigurationPolicy, err error) {
options := make(buildah.NamespaceOptions, 0, 7)
policy := buildah.NetworkDefault
- for _, what := range []string{string(specs.IPCNamespace), "net", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
- if c.IsSet(what) {
- how := c.String(what)
+ for _, what := range []string{string(specs.IPCNamespace), "net", "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
+ if c.Flags().Lookup(what) != nil && c.Flag(what).Changed {
+ how := c.Flag(what).Value.String()
switch what {
case "net", "network":
what = string(specs.NetworkNamespace)
@@ -560,9 +540,10 @@ func defaultIsolation() (buildah.Isolation, error) {
}
// IsolationOption parses the --isolation flag.
-func IsolationOption(c *cli.Context) (buildah.Isolation, error) {
- if c.String("isolation") != "" {
- switch strings.ToLower(c.String("isolation")) {
+func IsolationOption(c *cobra.Command) (buildah.Isolation, error) {
+ isolation, _ := c.Flags().GetString("isolation")
+ if isolation != "" {
+ switch strings.ToLower(isolation) {
case "oci":
return buildah.IsolationOCI, nil
case "rootless":
@@ -570,7 +551,7 @@ func IsolationOption(c *cli.Context) (buildah.Isolation, error) {
case "chroot":
return buildah.IsolationChroot, nil
default:
- return 0, errors.Errorf("unrecognized isolation type %q", c.String("isolation"))
+ return 0, errors.Errorf("unrecognized isolation type %q", isolation)
}
}
return defaultIsolation()
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index e677c8925..d1f33fb01 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -2,21 +2,27 @@ package buildah
import (
"context"
+ "fmt"
"io"
"strings"
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
+ "github.com/containers/image/directory"
+ "github.com/containers/image/docker"
+ dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/docker/reference"
tarfile "github.com/containers/image/docker/tarfile"
ociarchive "github.com/containers/image/oci/archive"
+ oci "github.com/containers/image/oci/layout"
"github.com/containers/image/signature"
is "github.com/containers/image/storage"
"github.com/containers/image/transports"
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/storage"
+ "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -37,14 +43,13 @@ type PullOptions struct {
// github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information.
SystemContext *types.SystemContext
- // Transport is a value which is prepended to the image's name, if the
- // image name alone can not be resolved to a reference to a source
- // image. No separator is implicitly added.
- Transport string
// BlobDirectory is the name of a directory in which we'll attempt to
// store copies of layer blobs that we pull down, if any. It should
// already exist.
BlobDirectory string
+ // AllTags is a boolean value that determines if all tagged images
+ // will be downloaded from the repository. The default is false.
+ AllTags bool
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) {
@@ -55,7 +60,7 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
file := split[len(split)-1]
var name string
switch srcRef.Transport().Name() {
- case util.DockerArchive:
+ case dockerarchive.Transport.Name():
tarSource, err := tarfile.NewSourceFromFile(file)
if err != nil {
return "", errors.Wrapf(err, "error opening tarfile %q as a source image", file)
@@ -82,7 +87,7 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
}
}
}
- case util.OCIArchive:
+ case ociarchive.Transport.Name():
// retrieve the manifest from index.json to access the image name
manifest, err := ociarchive.LoadManifestDescriptor(srcRef)
if err != nil {
@@ -97,7 +102,14 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
} else {
name = manifest.Annotations["org.opencontainers.image.ref.name"]
}
- case util.DirTransport:
+ case directory.Transport.Name():
+ // supports pull from a directory
+ name = split[1]
+ // remove leading "/"
+ if name[:1] == "/" {
+ name = name[1:]
+ }
+ case oci.Transport.Name():
// supports pull from a directory
name = split[1]
// remove leading "/"
@@ -141,22 +153,72 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
}
// Pull copies the contents of the image from somewhere else to local storage.
-func Pull(ctx context.Context, imageName string, options PullOptions) (types.ImageReference, error) {
+func Pull(ctx context.Context, imageName string, options PullOptions) error {
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
- return pullImage(ctx, options.Store, imageName, options, systemContext)
+
+ boptions := BuilderOptions{
+ FromImage: imageName,
+ SignaturePolicyPath: options.SignaturePolicyPath,
+ SystemContext: systemContext,
+ PullBlobDirectory: options.BlobDirectory,
+ ReportWriter: options.ReportWriter,
+ }
+
+ storageRef, transport, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
+ if err != nil {
+ return err
+ }
+
+ var errs *multierror.Error
+ if options.AllTags {
+ if transport != util.DefaultTransport {
+ return errors.New("Non-docker transport is not supported, for --all-tags pulling")
+ }
+
+ spec := transport + storageRef.DockerReference().Name()
+ storageRef, err = alltransports.ParseImageName(spec)
+ if err != nil {
+ return errors.Wrapf(err, "error getting repository tags")
+ }
+ tags, err := docker.GetRepositoryTags(ctx, systemContext, storageRef)
+ if err != nil {
+ return errors.Wrapf(err, "error getting repository tags")
+ }
+ for _, tag := range tags {
+ name := spec + ":" + tag
+ if options.ReportWriter != nil {
+ options.ReportWriter.Write([]byte("Pulling " + name + "\n"))
+ }
+ ref, err := pullImage(ctx, options.Store, transport, name, options, systemContext)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ continue
+ }
+ img, err := is.Transport.GetStoreImage(options.Store, ref)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ continue
+ }
+ fmt.Printf("%s\n", img.ID)
+ }
+ } else {
+ fmt.Printf("%s\n", img.ID)
+ }
+
+ return errs.ErrorOrNil()
}
-func pullImage(ctx context.Context, store storage.Store, imageName string, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) {
+func pullImage(ctx context.Context, store storage.Store, transport string, imageName string, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) {
spec := imageName
srcRef, err := alltransports.ParseImageName(spec)
if err != nil {
- if options.Transport == "" {
- options.Transport = util.DefaultTransport
- }
- logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, options.Transport, err)
- transport := options.Transport
- if transport != util.DefaultTransport {
- transport = transport + ":"
+ logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, transport, err)
+ if transport == "" {
+ transport = util.DefaultTransport
+ } else {
+ if transport != util.DefaultTransport {
+ transport = transport + ":"
+ }
}
spec = transport + spec
srcRef2, err2 := alltransports.ParseImageName(spec)
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index a0627f489..3a248f4f2 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -1130,10 +1130,14 @@ func (b *Builder) Run(command []string, options RunOptions) error {
case IsolationChroot:
err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
case IsolationOCIRootless:
+ moreCreateArgs := []string{"--no-new-keyring"}
+ if options.NoPivot {
+ moreCreateArgs = append(moreCreateArgs, "--no-pivot")
+ }
if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil {
return err
}
- err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
default:
err = errors.Errorf("don't know how to run this command")
}
@@ -1912,21 +1916,6 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err)
}
}
- // A helper that returns false if err is an error that would cause us
- // to give up.
- logIfNotRetryable := func(err error, what string) (retry bool) {
- if err == nil {
- return true
- }
- if errno, isErrno := err.(syscall.Errno); isErrno {
- switch errno {
- case syscall.EINTR, syscall.EAGAIN:
- return true
- }
- }
- logrus.Errorf("%s: %v", what, err)
- return false
- }
// Pass data back and forth.
pollTimeout := -1
for len(relayMap) > 0 {
@@ -1941,7 +1930,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
buf := make([]byte, 8192)
// Wait for new data from any input descriptor, or a notification that we're done.
_, err := unix.Poll(pollFds, pollTimeout)
- if !logIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
return
}
removes := make(map[int]struct{})
@@ -1971,7 +1960,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
writeFD, needToRelay := relayMap[readFD]
if needToRelay {
n, err := unix.Read(readFD, buf)
- if !logIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
return
}
// If it's zero-length on our stdin and we're
@@ -1996,7 +1985,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
// descriptor, read all that there is to read.
for pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
nr, err := unix.Read(readFD, buf)
- logIfNotRetryable(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err))
+ util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err))
if nr <= 0 {
break
}
@@ -2019,7 +2008,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
for writeFD := range relayBuffer {
if relayBuffer[writeFD].Len() > 0 {
n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes())
- if !logIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
return
}
if n > 0 {
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.c b/vendor/github.com/containers/buildah/unshare/unshare.c
index 3865e414f..8eefae41b 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.c
+++ b/vendor/github.com/containers/buildah/unshare/unshare.c
@@ -2,6 +2,8 @@
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <linux/memfd.h>
#include <fcntl.h>
#include <grp.h>
#include <sched.h>
@@ -12,6 +14,28 @@
#include <errno.h>
#include <unistd.h>
+#ifndef F_LINUX_SPECIFIC_BASE
+#define F_LINUX_SPECIFIC_BASE 1024
+#endif
+#ifndef F_ADD_SEALS
+#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
+#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
+#endif
+#ifndef F_SEAL_SEAL
+#define F_SEAL_SEAL 0x0001LU
+#endif
+#ifndef F_SEAL_SHRINK
+#define F_SEAL_SHRINK 0x0002LU
+#endif
+#ifndef F_SEAL_GROW
+#define F_SEAL_GROW 0x0004LU
+#endif
+#ifndef F_SEAL_WRITE
+#define F_SEAL_WRITE 0x0008LU
+#endif
+
+#define BUFSTEP 1024
+
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
@@ -59,6 +83,119 @@ static void _check_proc_sys_file(const char *path)
}
}
+static char **parse_proc_stringlist(const char *list) {
+ int fd, n, i, n_strings;
+ char *buf, *new_buf, **ret;
+ size_t size, new_size, used;
+
+ fd = open(list, O_RDONLY);
+ if (fd == -1) {
+ return NULL;
+ }
+ buf = NULL;
+ size = 0;
+ used = 0;
+ for (;;) {
+ new_size = used + BUFSTEP;
+ new_buf = realloc(buf, new_size);
+ if (new_buf == NULL) {
+ free(buf);
+ fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP));
+ return NULL;
+ }
+ buf = new_buf;
+ size = new_size;
+ memset(buf + used, '\0', size - used);
+ n = read(fd, buf + used, size - used - 1);
+ if (n < 0) {
+ fprintf(stderr, "read(): %m\n");
+ return NULL;
+ }
+ if (n == 0) {
+ break;
+ }
+ used += n;
+ }
+ close(fd);
+ n_strings = 0;
+ for (n = 0; n < used; n++) {
+ if ((n == 0) || (buf[n-1] == '\0')) {
+ n_strings++;
+ }
+ }
+ ret = calloc(n_strings + 1, sizeof(char *));
+ if (ret == NULL) {
+ fprintf(stderr, "calloc(): out of memory\n");
+ return NULL;
+ }
+ i = 0;
+ for (n = 0; n < used; n++) {
+ if ((n == 0) || (buf[n-1] == '\0')) {
+ ret[i++] = &buf[n];
+ }
+ }
+ ret[i] = NULL;
+ return ret;
+}
+
+static int buildah_reexec(void) {
+ char **argv, *exename;
+ int fd, mmfd, n_read, n_written;
+ struct stat st;
+ char buf[2048];
+
+ argv = parse_proc_stringlist("/proc/self/cmdline");
+ if (argv == NULL) {
+ return -1;
+ }
+ fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC);
+ if (fd == -1) {
+ fprintf(stderr, "open(\"/proc/self/exe\"): %m\n");
+ return -1;
+ }
+ if (fstat(fd, &st) == -1) {
+ fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n");
+ return -1;
+ }
+ exename = basename(argv[0]);
+ mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC);
+ if (mmfd == -1) {
+ fprintf(stderr, "memfd_create(): %m\n");
+ return -1;
+ }
+ for (;;) {
+ n_read = read(fd, buf, sizeof(buf));
+ if (n_read < 0) {
+ fprintf(stderr, "read(\"/proc/self/exe\"): %m\n");
+ return -1;
+ }
+ if (n_read == 0) {
+ break;
+ }
+ n_written = write(mmfd, buf, n_read);
+ if (n_written < 0) {
+ fprintf(stderr, "write(anonfd): %m\n");
+ return -1;
+ }
+ if (n_written != n_read) {
+ fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read);
+ return -1;
+ }
+ }
+ close(fd);
+ if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) {
+ close(mmfd);
+ fprintf(stderr, "Error sealing memfd copy: %m\n");
+ return -1;
+ }
+ if (fexecve(mmfd, argv, environ) == -1) {
+ close(mmfd);
+ fprintf(stderr, "Error during reexec(...): %m\n");
+ return -1;
+ }
+ return 0;
+}
+
void _buildah_unshare(void)
{
int flags, pidfd, continuefd, n, pgrp, sid, ctty;
@@ -132,5 +269,8 @@ void _buildah_unshare(void)
_exit(1);
}
}
+ if (buildah_reexec() != 0) {
+ _exit(1);
+ }
return;
}
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go
index 2a970b8d6..1072c2035 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/unshare/unshare.go
@@ -56,8 +56,10 @@ func (c *Cmd) Start() error {
c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags))
// Please the libpod "rootless" package to find the expected env variables.
- c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done")
- c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid()))
+ if os.Geteuid() != 0 {
+ c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done")
+ c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid()))
+ }
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 5dadec7c2..08fb99706 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -1,10 +1,8 @@
package buildah
import (
- "archive/tar"
"io"
"os"
- "sync"
"github.com/containers/image/docker/reference"
"github.com/containers/image/pkg/sysregistries"
@@ -15,6 +13,7 @@ import (
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
+ "github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -43,6 +42,28 @@ func copyStringSlice(s []string) []string {
return t
}
+func copyHistory(history []v1.History) []v1.History {
+ if len(history) == 0 {
+ return nil
+ }
+ h := make([]v1.History, 0, len(history))
+ for _, entry := range history {
+ created := entry.Created
+ if created != nil {
+ timestamp := *created
+ created = &timestamp
+ }
+ h = append(h, v1.History{
+ Created: created,
+ CreatedBy: entry.CreatedBy,
+ Author: entry.Author,
+ Comment: entry.Comment,
+ EmptyLayer: entry.EmptyLayer,
+ })
+ }
+ return h
+}
+
func convertStorageIDMaps(UIDMap, GIDMap []idtools.IDMap) ([]rspec.LinuxIDMapping, []rspec.LinuxIDMapping) {
uidmap := make([]rspec.LinuxIDMapping, 0, len(UIDMap))
gidmap := make([]rspec.LinuxIDMapping, 0, len(GIDMap))
@@ -88,42 +109,7 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings)
- if hasher != nil {
- originalUntar := archiver.Untar
- archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- contentReader, contentWriter, err := os.Pipe()
- if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
- }
- defer contentReader.Close()
- defer contentWriter.Close()
- var hashError error
- var hashWorker sync.WaitGroup
- hashWorker.Add(1)
- go func() {
- t := tar.NewReader(contentReader)
- _, err := t.Next()
- if err != nil {
- hashError = err
- }
- if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
- hashError = err
- }
- hashWorker.Done()
- }()
- if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
- }
- hashWorker.Wait()
- if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
- }
- return err
- }
- }
- return archiver.CopyFileWithTar
+ return chrootarchive.CopyFileWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
}
// copyWithTar returns a function which copies a directory tree from outside of
@@ -131,15 +117,7 @@ func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) f
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings)
- if hasher != nil {
- originalUntar := archiver.Untar
- archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
- }
- }
- return archiver.CopyWithTar
+ return chrootarchive.CopyWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
}
// untarPath returns a function which extracts an archive in a specified
@@ -147,15 +125,7 @@ func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings)
- if hasher != nil {
- originalUntar := archiver.Untar
- archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
- }
- }
- return archiver.UntarPath
+ return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
}
// tarPath returns a function which creates an archive of a specified
@@ -163,14 +133,7 @@ func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(sr
// container's ID maps
func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- return func(path string) (io.ReadCloser, error) {
- return archive.TarWithOptions(path, &archive.TarOptions{
- Compression: archive.Uncompressed,
- UIDMaps: tarMappings.UIDs(),
- GIDMaps: tarMappings.GIDs(),
- })
- }
+ return archive.TarPath(convertedUIDMap, convertedGIDMap)
}
// isRegistryBlocked checks if the named registry is marked as blocked
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index f4cac522e..d98493634 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -9,15 +9,13 @@ import (
"path"
"strconv"
"strings"
+ "syscall"
- "github.com/containers/image/directory"
- dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/docker/reference"
- ociarchive "github.com/containers/image/oci/archive"
"github.com/containers/image/pkg/sysregistriesv2"
"github.com/containers/image/signature"
is "github.com/containers/image/storage"
- "github.com/containers/image/tarball"
+ "github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
@@ -42,36 +40,18 @@ var (
"index.docker.io": "library",
"docker.io": "library",
}
- // Transports contains the possible transports used for images
- Transports = map[string]string{
- dockerarchive.Transport.Name(): "",
- ociarchive.Transport.Name(): "",
- directory.Transport.Name(): "",
- tarball.Transport.Name(): "",
- }
- // DockerArchive is the transport we prepend to an image name
- // when saving to docker-archive
- DockerArchive = dockerarchive.Transport.Name()
- // OCIArchive is the transport we prepend to an image name
- // when saving to oci-archive
- OCIArchive = ociarchive.Transport.Name()
- // DirTransport is the transport for pushing and pulling
- // images to and from a directory
- DirTransport = directory.Transport.Name()
- // TarballTransport is the transport for importing a tar archive
- // and creating a filesystem image
- TarballTransport = tarball.Transport.Name()
)
// ResolveName checks if name is a valid image name, and if that name doesn't
// include a domain portion, returns a list of the names which it might
-// correspond to in the set of configured registries,
-// and a boolean which is true iff 1) the list of search registries was used, and 2) it was empty.
+// correspond to in the set of configured registries, the transport used to
+// pull the image, and a boolean which is true iff
+// 1) the list of search registries was used, and 2) it was empty.
// NOTE: The "list of search registries is empty" check does not count blocked registries,
// and neither the implied "localhost" nor a possible firstRegistry are counted
-func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, bool, error) {
+func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
if name == "" {
- return nil, false, nil
+ return nil, "", false, nil
}
// Maybe it's a truncated image ID. Don't prepend a registry name, then.
@@ -79,27 +59,28 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) {
// It's a truncated version of the ID of an image that's present in local storage;
// we need only expand the ID.
- return []string{img.ID}, false, nil
+ return []string{img.ID}, "", false, nil
}
}
// If the image includes a transport's name as a prefix, use it as-is.
+ if strings.HasPrefix(name, DefaultTransport) {
+ return []string{strings.TrimPrefix(name, DefaultTransport)}, DefaultTransport, false, nil
+ }
split := strings.SplitN(name, ":", 2)
if len(split) == 2 {
- if _, ok := Transports[split[0]]; ok {
- return []string{split[1]}, false, nil
+ if trans := transports.Get(split[0]); trans != nil {
+ return []string{split[1]}, trans.Name(), false, nil
}
}
-
- name = strings.TrimPrefix(name, DefaultTransport)
// If the image name already included a domain component, we're done.
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
- return nil, false, errors.Wrapf(err, "error parsing image name %q", name)
+ return nil, "", false, errors.Wrapf(err, "error parsing image name %q", name)
}
if named.String() == name {
// Parsing produced the same result, so there was a domain name in there to begin with.
- return []string{name}, false, nil
+ return []string{name}, DefaultTransport, false, nil
}
if reference.Domain(named) != "" && RegistryDefaultPathPrefix[reference.Domain(named)] != "" {
// If this domain can cause us to insert something in the middle, check if that happened.
@@ -116,7 +97,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/"
if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name {
// Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with.
- return []string{name}, false, nil
+ return []string{name}, DefaultTransport, false, nil
}
}
@@ -152,7 +133,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
candidate := path.Join(registry, middle, name)
candidates = append(candidates, candidate)
}
- return candidates, searchRegistriesAreEmpty, nil
+ return candidates, DefaultTransport, searchRegistriesAreEmpty, nil
}
// ExpandNames takes unqualified names, parses them as image names, and returns
@@ -163,7 +144,7 @@ func ExpandNames(names []string, firstRegistry string, systemContext *types.Syst
expanded := make([]string, 0, len(names))
for _, n := range names {
var name reference.Named
- nameList, _, err := ResolveName(n, firstRegistry, systemContext, store)
+ nameList, _, _, err := ResolveName(n, firstRegistry, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
@@ -199,7 +180,7 @@ func FindImage(store storage.Store, firstRegistry string, systemContext *types.S
var ref types.ImageReference
var img *storage.Image
var err error
- names, _, err := ResolveName(image, firstRegistry, systemContext, store)
+ names, _, _, err := ResolveName(image, firstRegistry, systemContext, store)
if err != nil {
return nil, nil, errors.Wrapf(err, "error parsing name %q", image)
}
@@ -419,3 +400,32 @@ func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error
}
return policyContext, nil
}
+
+// logIfNotErrno logs the error message unless err is either nil or one of the
+// listed syscall.Errno values. It returns true if it logged an error.
+func logIfNotErrno(err error, what string, ignores ...syscall.Errno) (logged bool) {
+ if err == nil {
+ return false
+ }
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ for _, ignore := range ignores {
+ if errno == ignore {
+ return false
+ }
+ }
+ }
+ logrus.Error(what)
+ return true
+}
+
+// LogIfNotRetryable logs "what" if err is set and is not an EINTR or EAGAIN
+// syscall.Errno. Returns "true" if we can continue.
+func LogIfNotRetryable(err error, what string) (retry bool) {
+ return !logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN)
+}
+
+// LogIfUnexpectedWhileDraining logs "what" if err is set and is not an EINTR
+// or EAGAIN or EIO syscall.Errno.
+func LogIfUnexpectedWhileDraining(err error, what string) {
+ logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN, syscall.EIO)
+}
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index ee8192935..7438fc909 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -1,68 +1,67 @@
-github.com/Azure/go-ansiterm master
-github.com/blang/semver master
-github.com/BurntSushi/toml master
-github.com/containerd/continuity master
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
+github.com/blang/semver v3.5.0
+github.com/BurntSushi/toml v0.2.0
+github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image 0c6cc8e1420001ae39fa89d308b3b3bc5ee81c57
-github.com/boltdb/bolt master
-github.com/containers/libpod c8eaf59d5f4bec249db8134c6a9fcfbcac792519
-github.com/containers/storage 60a692f7ce891feb91ce0eda87bd06bfd5651dff
+github.com/containers/image v1.4
+github.com/vbauerster/mpb v3.3.4
+github.com/mattn/go-isatty v0.0.4
+github.com/VividCortex/ewma v1.1.1
+github.com/boltdb/bolt v1.3.1
+github.com/containers/libpod v1.0
+github.com/containers/storage v1.10
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
-github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
-github.com/docker/engine-api master
-github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
-github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
+github.com/docker/docker-credential-helpers v0.6.1
+github.com/docker/go-connections v0.4.0
+github.com/docker/go-units v0.3.2
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
-github.com/fsouza/go-dockerclient master
-github.com/ghodss/yaml master
-github.com/gogo/protobuf master
-github.com/golang/glog master
-github.com/gorilla/context master
-github.com/gorilla/mux master
-github.com/hashicorp/errwrap master
-github.com/hashicorp/go-cleanhttp master
-github.com/hashicorp/go-multierror master
-github.com/imdario/mergo master
-github.com/mattn/go-runewidth master
-github.com/mattn/go-shellwords master
-github.com/Microsoft/go-winio master
-github.com/Microsoft/hcsshim master
-github.com/mistifyio/go-zfs master
+github.com/fsouza/go-dockerclient 29c1814d12c072344bb91aac5d2ff719db39c523
+github.com/ghodss/yaml v1.0.0
+github.com/gogo/protobuf v1.2.0
+github.com/gorilla/context v1.1.1
+github.com/gorilla/mux v1.6.2
+github.com/hashicorp/errwrap v1.0.0
+github.com/hashicorp/go-multierror v1.0.0
+github.com/imdario/mergo v0.3.6
+github.com/mattn/go-shellwords v1.0.3
+github.com/Microsoft/go-winio v0.4.11
+github.com/Microsoft/hcsshim v0.8.3
+github.com/mistifyio/go-zfs v2.1.1
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
-github.com/mtrmac/gpgme master
-github.com/Nvveen/Gotty master
+github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
+# TODO: Gotty has not been updated since 2012. Can we find a replacement?
+github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0
-github.com/opencontainers/runc master
+github.com/opencontainers/runc v1.0.0-rc6
github.com/opencontainers/runtime-spec v1.0.0
-github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux master
-github.com/openshift/imagebuilder master
+github.com/opencontainers/runtime-tools v0.8.0
+github.com/opencontainers/selinux v1.1
+github.com/openshift/imagebuilder 36823496a6868f72bc36282cc475eb8a070c0934
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
-github.com/pborman/uuid master
-github.com/pkg/errors master
+github.com/pkg/errors v0.8.1
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
-github.com/seccomp/containers-golang master
-github.com/sirupsen/logrus master
-github.com/syndtr/gocapability master
-github.com/tchap/go-patricia master
-github.com/ulikunitz/xz v0.5.4
-github.com/urfave/cli 934abfb2f102315b5794e15ebc7949e4ca253920
+github.com/seccomp/libseccomp-golang v0.9.0
+github.com/seccomp/containers-golang v0.1
+github.com/sirupsen/logrus v1.0.0
+github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
+github.com/tchap/go-patricia v2.2.6
+github.com/ulikunitz/xz v0.5.5
github.com/vbatts/tar-split v0.10.2
-github.com/xeipuuv/gojsonpointer master
-github.com/xeipuuv/gojsonreference master
-github.com/xeipuuv/gojsonschema master
-golang.org/x/crypto master
-golang.org/x/net master
-golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
-golang.org/x/sys master
-golang.org/x/text master
-gopkg.in/cheggaaa/pb.v1 v1.0.27
-gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
-k8s.io/apimachinery master
-k8s.io/client-go master
-k8s.io/kubernetes master
+github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
+github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
+github.com/xeipuuv/gojsonschema v1.1.0
+golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 https://github.com/golang/crypto
+golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 https://github.com/golang/net
+golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync
+golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/golang/sys
+golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text
+gopkg.in/yaml.v2 v2.2.2
+k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
github.com/klauspost/pgzip v1.2.1
github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0
+github.com/onsi/gomega v1.4.3
+github.com/spf13/cobra v0.0.3
+github.com/spf13/pflag v1.0.3
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index e783416ad..ba99336aa 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -13,19 +13,22 @@ import (
"sync"
"time"
+ "github.com/containers/image/docker/reference"
"github.com/containers/image/image"
+ "github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/klauspost/pgzip"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "github.com/vbauerster/mpb"
+ "github.com/vbauerster/mpb/decor"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/sync/semaphore"
- pb "gopkg.in/cheggaaa/pb.v1"
)
type digestingReader struct {
@@ -212,7 +215,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
// source image admissibility.
-func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifest []byte, retErr error) {
+func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) {
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
// Make sure we fail cleanly in such cases.
multiImage, err := isMultiImage(ctx, unparsedImage)
@@ -235,6 +238,26 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
}
+ // If the destination is a digested reference, make a note of that, determine what digest value we're
+ // expecting, and check that the source manifest matches it.
+ destIsDigestedReference := false
+ if named := c.dest.Reference().DockerReference(); named != nil {
+ if digested, ok := named.(reference.Digested); ok {
+ destIsDigestedReference = true
+ sourceManifest, _, err := src.Manifest(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error reading manifest from source image")
+ }
+ matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error computing digest of source image's manifest")
+ }
+ if !matches {
+ return nil, errors.New("Digest of source image's manifest would not match destination reference")
+ }
+ }
+ }
+
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
return nil, err
}
@@ -262,15 +285,15 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
src: src,
// diffIDsAreNeeded is computed later
- canModifyManifest: len(sigs) == 0,
- // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
- // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
- // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
- // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
- // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
- // and we would reuse and sign it.
- canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
+ canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
}
+ // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
if err := ic.updateEmbeddedDockerReference(); err != nil {
return nil, err
@@ -294,7 +317,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
// So, try the preferred manifest MIME type. If the process succeeds, fine…
- manifest, err = ic.copyUpdatedConfigAndManifest(ctx)
+ manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx)
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
@@ -325,7 +348,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
// We have successfully uploaded a manifest.
- manifest = attemptedManifest
+ manifestBytes = attemptedManifest
errs = nil // Mark this as a success so that we don't abort below.
break
}
@@ -335,7 +358,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
if options.SignBy != "" {
- newSig, err := c.createSignature(manifest, options.SignBy)
+ newSig, err := c.createSignature(manifestBytes, options.SignBy)
if err != nil {
return nil, err
}
@@ -347,7 +370,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
return nil, errors.Wrap(err, "Error writing signatures")
}
- return manifest, nil
+ return manifestBytes, nil
}
// Printf writes a formatted string to c.reportWriter.
@@ -400,27 +423,6 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
return nil
}
-// shortDigest returns the first 12 characters of the digest.
-func shortDigest(d digest.Digest) string {
- return d.Encoded()[:12]
-}
-
-// createProgressBar creates a pb.ProgressBar. Note that if the copier's
-// reportWriter is ioutil.Discard, the progress bar's output will be discarded
-// and a single line will be printed instead.
-func (c *copier) createProgressBar(srcInfo types.BlobInfo, kind string) *pb.ProgressBar {
- bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
- bar.SetMaxWidth(80)
- bar.ShowTimeLeft = false
- bar.ShowPercent = false
- bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest)))
- bar.Output = c.progressOutput
- if bar.Output == ioutil.Discard {
- c.Printf("Copying %s %s\n", kind, srcInfo.Digest)
- }
- return bar
-}
-
// isTTY returns true if the io.Writer is a file and a tty.
func isTTY(w io.Writer) bool {
if f, ok := w.(*os.File); ok {
@@ -455,6 +457,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
copyGroup.Add(numLayers)
+
// copySemaphore is used to limit the number of parallel downloads to
// avoid malicious images causing troubles and to be nice to servers.
var copySemaphore *semaphore.Weighted
@@ -465,8 +468,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
}
data := make([]copyLayerData, numLayers)
- copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *pb.ProgressBar) {
- defer bar.Finish()
+ copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *mpb.Bar) {
defer copySemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
@@ -476,45 +478,37 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// does not support them.
if ic.diffIDsAreNeeded {
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
- bar.Prefix(fmt.Sprintf("Skipping blob %s (DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest)))
- bar.Finish()
} else {
cld.destInfo = srcLayer
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
- bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest)))
- bar.Add64(bar.Total)
- bar.Finish()
}
} else {
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
}
data[index] = cld
+ bar.SetTotal(srcLayer.Size, true)
}
- progressBars := make([]*pb.ProgressBar, numLayers)
- for i, srcInfo := range srcInfos {
- bar := ic.c.createProgressBar(srcInfo, "blob")
- progressBars[i] = bar
- }
+ func() { // A scope for defer
+ progressPool, progressCleanup := ic.c.newProgressPool(ctx)
+ defer progressCleanup()
- progressPool := pb.NewPool(progressBars...)
- progressPool.Output = ic.c.progressOutput
+ progressBars := make([]*mpb.Bar, numLayers)
+ for i, srcInfo := range srcInfos {
+ progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob")
+ }
- if err := progressPool.Start(); err != nil {
- return errors.Wrapf(err, "error creating progress-bar pool")
- }
+ for i, srcLayer := range srcInfos {
+ copySemaphore.Acquire(ctx, 1)
+ go copyLayerHelper(i, srcLayer, progressBars[i])
+ }
- for i, srcLayer := range srcInfos {
- copySemaphore.Acquire(ctx, 1)
- go copyLayerHelper(i, srcLayer, progressBars[i])
- }
+ // Wait for all layers to be copied
+ copyGroup.Wait()
+ }()
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
-
- copyGroup.Wait()
- progressPool.Stop()
-
for i, cld := range data {
if cld.err != nil {
return cld.err
@@ -585,6 +579,44 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
return manifest, nil
}
+// newProgressPool creates a *mpb.Progress and a cleanup function.
+// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
+func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
+ ctx, cancel := context.WithCancel(ctx)
+ pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
+ return pool, func() {
+ cancel()
+ pool.Wait()
+ }
+}
+
+// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
+// is ioutil.Discard, the progress bar's output will be discarded
+func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar {
+ // shortDigestLen is the length of the digest used for blobs.
+ const shortDigestLen = 12
+
+ prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
+ // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
+ maxPrefixLen := len("Copying blob ") + shortDigestLen
+ if len(prefix) > maxPrefixLen {
+ prefix = prefix[:maxPrefixLen]
+ }
+
+ bar := pool.AddBar(info.Size,
+ mpb.PrependDecorators(
+ decor.Name(prefix),
+ ),
+ mpb.AppendDecorators(
+ decor.CountersKibiByte("%.1f / %.1f"),
+ ),
+ )
+ if c.progressOutput == ioutil.Discard {
+ c.Printf("Copying %s %s\n", kind, info.Digest)
+ }
+ return bar
+}
+
// copyConfig copies config.json, if any, from src to dest.
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
@@ -593,12 +625,20 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
if err != nil {
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
}
- bar := c.createProgressBar(srcInfo, "config")
- defer bar.Finish()
- bar.Start()
- destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
+
+ destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
+ progressPool, progressCleanup := c.newProgressPool(ctx)
+ defer progressCleanup()
+ bar := c.createProgressBar(progressPool, srcInfo, "config")
+ destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ bar.SetTotal(int64(len(configBlob)), true)
+ return destInfo, nil
+ }()
if err != nil {
- return err
+ return nil
}
if destInfo.Digest != srcInfo.Digest {
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
@@ -616,7 +656,7 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
-func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) {
+func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *mpb.Bar) (types.BlobInfo, digest.Digest, error) {
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
@@ -627,9 +667,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
}
if reused {
- bar.Prefix(fmt.Sprintf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest)))
- bar.Add64(bar.Total)
- bar.Finish()
+ logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
return blobInfo, cachedDiffID, nil
}
}
@@ -641,8 +679,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
}
defer srcStream.Close()
- blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
- diffIDIsNeeded, bar)
+ blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
if err != nil {
return types.BlobInfo{}, "", err
}
@@ -670,7 +707,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
// perhaps compressing the stream if canCompress,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan diffIDResult, error) {
+ diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
@@ -731,7 +768,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
- canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) (types.BlobInfo, error) {
+ canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
@@ -754,7 +791,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
isCompressed := decompressor != nil
- destStream = bar.NewProxyReader(destStream)
+ destStream = bar.ProxyReader(destStream)
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index e9882c024..38500dd0e 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -12,6 +12,7 @@ import (
"net/url"
"os"
"path/filepath"
+ "strings"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
@@ -390,14 +391,29 @@ func isManifestInvalidError(err error) bool {
if !ok || len(errors) == 0 {
return false
}
- ec, ok := errors[0].(errcode.ErrorCoder)
+ err = errors[0]
+ ec, ok := err.(errcode.ErrorCoder)
if !ok {
return false
}
+
+ switch ec.ErrorCode() {
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
+ case v2.ErrorCodeManifestInvalid:
+ return true
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
- return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
+ case v2.ErrorCodeTagInvalid:
+ return true
+ // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when
+ // uploading an OCI manifest that is (correctly, according to the spec) missing
+ // a top-level media type. See libpod issue #1719
+ // FIXME: remove this case when ECR behavior is fixed
+ case errcode.ErrorCodeUnsupported:
+ return strings.Contains(err.Error(), "Invalid JSON syntax")
+ default:
+ return false
+ }
}
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index 063511535..8367792bf 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -147,10 +147,10 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
break
}
}
- if resp.Body != nil && err == nil {
- return resp.Body, getBlobSize(resp), nil
+ if err != nil {
+ return nil, 0, err
}
- return nil, 0, err
+ return resp.Body, getBlobSize(resp), nil
}
func getBlobSize(resp *http.Response) int64 {
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index 889e5f8e8..03735f8a4 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -9,6 +9,7 @@ import (
"io/ioutil"
"os"
"path"
+ "sync"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
@@ -21,8 +22,10 @@ import (
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
tarPath string
- removeTarPathOnClose bool // Remove temp file on close if true
+ removeTarPathOnClose bool // Remove temp file on close if true
+ cacheDataLock sync.Once // Atomic way to ensure that ensureCachedDataIsPresent is only invoked once
// The following data is only available after ensureCachedDataIsPresent() succeeds
+ cacheDataResult error // The return value of ensureCachedDataIsPresent, since it should be as safe to cache as the side effects
tarManifest *ManifestItem // nil if not available yet.
configBytes []byte
configDigest digest.Digest
@@ -199,43 +202,46 @@ func (s *Source) readTarComponent(path string) ([]byte, error) {
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
func (s *Source) ensureCachedDataIsPresent() error {
- if s.tarManifest != nil {
- return nil
- }
-
- // Read and parse manifest.json
- tarManifest, err := s.loadTarManifest()
- if err != nil {
- return err
- }
+ s.cacheDataLock.Do(func() {
+ // Read and parse manifest.json
+ tarManifest, err := s.loadTarManifest()
+ if err != nil {
+ s.cacheDataResult = err
+ return
+ }
- // Check to make sure length is 1
- if len(tarManifest) != 1 {
- return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
- }
+ // Check to make sure length is 1
+ if len(tarManifest) != 1 {
+ s.cacheDataResult = errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
+ return
+ }
- // Read and parse config.
- configBytes, err := s.readTarComponent(tarManifest[0].Config)
- if err != nil {
- return err
- }
- var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
- if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
- return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
- }
+ // Read and parse config.
+ configBytes, err := s.readTarComponent(tarManifest[0].Config)
+ if err != nil {
+ s.cacheDataResult = err
+ return
+ }
+ var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
+ if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
+ s.cacheDataResult = errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
+ return
+ }
- knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
- if err != nil {
- return err
- }
+ knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
+ if err != nil {
+ s.cacheDataResult = err
+ return
+ }
- // Success; commit.
- s.tarManifest = &tarManifest[0]
- s.configBytes = configBytes
- s.configDigest = digest.FromBytes(configBytes)
- s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
- s.knownLayers = knownLayers
- return nil
+ // Success; commit.
+ s.tarManifest = &tarManifest[0]
+ s.configBytes = configBytes
+ s.configDigest = digest.FromBytes(configBytes)
+ s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
+ s.knownLayers = knownLayers
+ })
+ return s.cacheDataResult
}
// loadTarManifest loads and decodes the manifest.json.
@@ -399,7 +405,7 @@ func (r uncompressedReadCloser) Close() error {
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *Source) HasThreadSafeGetBlob() bool {
- return false
+ return true
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index df432c9f3..35d852139 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -17,7 +17,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
@@ -313,24 +313,19 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
if err != nil {
return nil, 0, err
}
- defer mfz.Close()
metaUnpacker := storage.NewJSONUnpacker(mfz)
getter, err := newOSTreePathFileGetter(s.repo, branch)
if err != nil {
+ mfz.Close()
return nil, 0, err
}
ots := asm.NewOutputTarStream(getter, metaUnpacker)
- pipeReader, pipeWriter := io.Pipe()
- go func() {
- io.Copy(pipeWriter, ots)
- pipeWriter.Close()
- }()
-
- rc := ioutils.NewReadCloserWrapper(pipeReader, func() error {
+ rc := ioutils.NewReadCloserWrapper(ots, func() error {
getter.Close()
+ mfz.Close()
return ots.Close()
})
return rc, layerSize, nil
diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
index 9e3e9cfe1..3d0bb0df2 100644
--- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
@@ -53,20 +53,23 @@ type Registry struct {
Prefix string `toml:"prefix"`
}
-// backwards compatability to sysregistries v1
-type v1TOMLregistries struct {
+// V1TOMLregistries is for backwards compatibility to sysregistries v1
+type V1TOMLregistries struct {
Registries []string `toml:"registries"`
}
+// V1TOMLConfig is for backwards compatibility to sysregistries v1
+type V1TOMLConfig struct {
+ Search V1TOMLregistries `toml:"search"`
+ Insecure V1TOMLregistries `toml:"insecure"`
+ Block V1TOMLregistries `toml:"block"`
+}
+
// tomlConfig is the data type used to unmarshal the toml config.
type tomlConfig struct {
Registries []Registry `toml:"registry"`
// backwards compatability to sysregistries v1
- V1Registries struct {
- Search v1TOMLregistries `toml:"search"`
- Insecure v1TOMLregistries `toml:"insecure"`
- Block v1TOMLregistries `toml:"block"`
- } `toml:"registries"`
+ V1TOMLConfig `toml:"registries"`
}
// InvalidRegistries represents an invalid registry configurations. An example
@@ -129,21 +132,21 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
// Note: config.V1Registries.Search needs to be processed first to ensure registryOrder is populated in the right order
// if one of the search registries is also in one of the other lists.
- for _, search := range config.V1Registries.Search.Registries {
+ for _, search := range config.V1TOMLConfig.Search.Registries {
reg, err := getRegistry(search)
if err != nil {
return nil, err
}
reg.Search = true
}
- for _, blocked := range config.V1Registries.Block.Registries {
+ for _, blocked := range config.V1TOMLConfig.Block.Registries {
reg, err := getRegistry(blocked)
if err != nil {
return nil, err
}
reg.Blocked = true
}
- for _, insecure := range config.V1Registries.Insecure.Registries {
+ for _, insecure := range config.V1TOMLConfig.Insecure.Registries {
reg, err := getRegistry(insecure)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index b53fbdf6e..67dc6142b 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -14,6 +14,7 @@ import (
"sync"
"sync/atomic"
+ "github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
@@ -70,6 +71,13 @@ type storageImageCloser struct {
size int64
}
+// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
+// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably;
+// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
+func manifestBigDataKey(digest digest.Digest) string {
+ return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
+}
+
// newImageSource sets up an image for reading.
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
// First, locate the image.
@@ -177,12 +185,29 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
return nil, "", ErrNoManifestLists
}
if len(s.cachedManifest) == 0 {
- // We stored the manifest as an item named after storage.ImageDigestBigDataKey.
- cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
- if err != nil {
- return nil, "", err
+ // The manifest is stored as a big data item.
+ // Prefer the manifest corresponding to the user-specified digest, if available.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ key := manifestBigDataKey(digested.Digest())
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
+ return nil, "", err
+ }
+ if err == nil {
+ s.cachedManifest = blob
+ }
+ }
+ }
+ // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
+ // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
+ if len(s.cachedManifest) == 0 {
+ cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
+ if err != nil {
+ return nil, "", err
+ }
+ s.cachedManifest = cachedBlob
}
- s.cachedManifest = cachedBlob
}
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
}
@@ -660,6 +685,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
}
lastLayer = layer.ID
}
+
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
// was originally created, in case we're just copying it. If not, no harm done.
options := &storage.ImageOptions{}
@@ -667,9 +693,6 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
logrus.Debugf("setting image creation date to %s", inspect.Created)
options.CreationDate = *inspect.Created
}
- if manifestDigest, err := manifest.Digest(s.manifest); err == nil {
- options.Digest = manifestDigest
- }
// Create the image record, pointing to the most-recently added layer.
intendedID := s.imageRef.id
if intendedID == "" {
@@ -735,8 +758,20 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
}
logrus.Debugf("set names of image %q to %v", img.ID, names)
}
- // Save the manifest. Use storage.ImageDigestBigDataKey as the item's
- // name, so that its digest can be used to locate the image in the Store.
+ // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store.
+ // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
+ // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
+ manifestDigest, err := manifest.Digest(s.manifest)
+ if err != nil {
+ return errors.Wrapf(err, "error computing manifest digest")
+ }
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
+ return err
+ }
if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
@@ -788,9 +823,21 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
}
// PutManifest writes the manifest to the destination.
-func (s *storageImageDestination) PutManifest(ctx context.Context, manifest []byte) error {
- s.manifest = make([]byte, len(manifest))
- copy(s.manifest, manifest)
+func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error {
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest())
+ if err != nil {
+ return err
+ }
+ if !matches {
+ return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest())
+ }
+ }
+ }
+
+ s.manifest = make([]byte, len(manifestBlob))
+ copy(s.manifest, manifestBlob)
return nil
}
diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go
index 73306b972..c046d9f22 100644
--- a/vendor/github.com/containers/image/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/storage/storage_reference.go
@@ -55,7 +55,7 @@ func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
// one present with the same name or ID, and return the image.
func (s *storageReference) resolveImage() (*storage.Image, error) {
var loadedImage *storage.Image
- if s.id == "" {
+ if s.id == "" && s.named != nil {
// Look for an image that has the expanded reference name as an explicit Name value.
image, err := s.transport.store.Image(s.named.String())
if image != nil && err == nil {
@@ -69,7 +69,7 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
// though possibly with a different tag or digest, as a Name value, so
// that the canonical reference can be implicitly resolved to the image.
images, err := s.transport.store.ImagesByDigest(digested.Digest())
- if images != nil && err == nil {
+ if err == nil && len(images) > 0 {
for _, image := range images {
if imageMatchesRepo(image, s.named) {
loadedImage = image
@@ -97,6 +97,24 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
return nil, ErrNoSuchImage
}
}
+ // Default to having the image digest that we hand back match the most recently
+ // added manifest...
+ if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok {
+ loadedImage.Digest = digest
+ }
+ // ... unless the named reference says otherwise, and it matches one of the digests
+ // in the image. For those cases, set the Digest field to that value, for the
+ // sake of older consumers that don't know there's a whole list in there now.
+ if s.named != nil {
+ if digested, ok := s.named.(reference.Digested); ok {
+ for _, digest := range loadedImage.Digests {
+ if digest == digested.Digest() {
+ loadedImage.Digest = digest
+ break
+ }
+ }
+ }
+ }
return loadedImage, nil
}
diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go
index b53c389bd..02d2f5c08 100644
--- a/vendor/github.com/containers/image/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/storage/storage_transport.go
@@ -284,11 +284,6 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
}
}
if sref, ok := ref.(*storageReference); ok {
- if sref.id != "" {
- if img, err := store.Image(sref.id); err == nil {
- return img, nil
- }
- }
tmpRef := *sref
if img, err := tmpRef.resolveImage(); err == nil {
return img, nil
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index bbbb1a458..1c5b6b378 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -28,7 +28,6 @@ golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
-gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
github.com/xeipuuv/gojsonschema master
@@ -48,3 +47,6 @@ github.com/boltdb/bolt master
github.com/klauspost/pgzip v1.2.1
github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0
+github.com/vbauerster/mpb v3.3.4
+github.com/mattn/go-isatty v0.0.4
+github.com/VividCortex/ewma v1.1.1
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index beaf41f39..10d628dbe 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -568,6 +568,9 @@ func (r *containerStore) Lock() {
r.lockfile.Lock()
}
+func (r *containerStore) RLock() {
+ r.lockfile.RLock()
+}
func (r *containerStore) Unlock() {
r.lockfile.Unlock()
}
diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go
index 40b912bb3..aef6becfe 100644
--- a/vendor/github.com/containers/storage/containers_ffjson.go
+++ b/vendor/github.com/containers/storage/containers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./containers.go
+// source: containers.go
package storage
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 57d6dd63a..657d9b3ce 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -239,6 +239,8 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
key = strings.ToLower(key)
switch key {
+ case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
+ logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary")
case ".mountopt", "overlay.mountopt", "overlay2.mountopt":
o.mountOptions = val
case ".size", "overlay.size", "overlay2.size":
diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go
index 562415c09..63cb9ab74 100644
--- a/vendor/github.com/containers/storage/errors.go
+++ b/vendor/github.com/containers/storage/errors.go
@@ -43,8 +43,6 @@ var (
ErrSizeUnknown = errors.New("size is not known")
// ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents.
ErrStoreIsReadOnly = errors.New("called a write method on a read-only store")
- // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write.
- ErrLockReadOnly = errors.New("lock is not a read-write lock")
// ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images.
ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images")
// ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers.
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index d99842534..93505f5fb 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -5,8 +5,10 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "strings"
"time"
+ "github.com/containers/image/manifest"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
@@ -15,9 +17,13 @@ import (
)
const (
- // ImageDigestBigDataKey is the name of the big data item whose
- // contents we consider useful for computing a "digest" of the
- // image, by which we can locate the image later.
+ // ImageDigestManifestBigDataNamePrefix is a prefix of big data item
+ // names which we consider to be manifests, used for computing a
+ // "digest" value for the image as a whole, by which we can locate the
+ // image later.
+ ImageDigestManifestBigDataNamePrefix = "manifest"
+ // ImageDigestBigDataKey is provided for compatibility with older
+ // versions of the image library. It will be removed in the future.
ImageDigestBigDataKey = "manifest"
)
@@ -27,12 +33,19 @@ type Image struct {
// value which was generated by the library.
ID string `json:"id"`
- // Digest is a digest value that we can use to locate the image.
+ // Digest is a digest value that we can use to locate the image, if one
+ // was specified at creation-time.
Digest digest.Digest `json:"digest,omitempty"`
+ // Digests is a list of digest values of the image's manifests, and
+ // possibly a manually-specified value, that we can use to locate the
+ // image. If Digest is set, its value is also in this list.
+ Digests []digest.Digest `json:"-"`
+
// Names is an optional set of user-defined convenience values. The
// image can be referred to by its ID or any of its names. Names are
- // unique among images.
+ // unique among images, and are often the text representation of tagged
+ // or canonical references.
Names []string `json:"names,omitempty"`
// TopLayer is the ID of the topmost layer of the image itself, if the
@@ -92,8 +105,10 @@ type ROImageStore interface {
// Images returns a slice enumerating the known images.
Images() ([]Image, error)
- // Images returns a slice enumerating the images which have a big data
- // item with the name ImageDigestBigDataKey and the specified digest.
+ // ByDigest returns a slice enumerating the images which have either an
+ // explicitly-set digest, or a big data item with a name that starts
+ // with ImageDigestManifestBigDataNamePrefix, which matches the
+ // specified digest.
ByDigest(d digest.Digest) ([]*Image, error)
}
@@ -111,7 +126,8 @@ type ImageStore interface {
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
// SetNames replaces the list of names associated with an image with the
- // supplied values.
+ // supplied values. The values are expected to be valid normalized
+ // named image references.
SetNames(id string, names []string) error
// Delete removes the record of the image.
@@ -135,6 +151,7 @@ func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
+ Digests: copyDigestSlice(i.Digests),
Names: copyStringSlice(i.Names),
TopLayer: i.TopLayer,
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
@@ -147,6 +164,17 @@ func copyImage(i *Image) *Image {
}
}
+func copyImageSlice(slice []*Image) []*Image {
+ if len(slice) > 0 {
+ cp := make([]*Image, len(slice))
+ for i := range slice {
+ cp[i] = copyImage(slice[i])
+ }
+ return cp
+ }
+ return nil
+}
+
func (r *imageStore) Images() ([]Image, error) {
images := make([]Image, len(r.images))
for i := range r.images {
@@ -167,6 +195,46 @@ func (r *imageStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}
+// bigDataNameIsManifest determines if a big data item with the specified name
+// is considered to be representative of the image, in that its digest can be
+// said to also be the image's digest. Currently, if its name is, or begins
+// with, "manifest", we say that it is.
+func bigDataNameIsManifest(name string) bool {
+ return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix)
+}
+
+// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
+// list of the unique values that would identify the image.
+func (image *Image) recomputeDigests() error {
+ validDigests := make([]digest.Digest, 0, len(image.BigDataDigests)+1)
+ digests := make(map[digest.Digest]struct{})
+ if image.Digest != "" {
+ if err := image.Digest.Validate(); err != nil {
+ return errors.Wrapf(err, "error validating image digest %q", string(image.Digest))
+ }
+ digests[image.Digest] = struct{}{}
+ validDigests = append(validDigests, image.Digest)
+ }
+ for name, digest := range image.BigDataDigests {
+ if !bigDataNameIsManifest(name) {
+ continue
+ }
+ if digest.Validate() != nil {
+ return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
+ }
+ // Deduplicate the digest values.
+ if _, known := digests[digest]; !known {
+ digests[digest] = struct{}{}
+ validDigests = append(validDigests, digest)
+ }
+ }
+ if image.Digest == "" && len(validDigests) > 0 {
+ image.Digest = validDigests[0]
+ }
+ image.Digests = validDigests
+ return nil
+}
+
func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
@@ -189,17 +257,18 @@ func (r *imageStore) Load() error {
r.removeName(conflict, name)
shouldSave = true
}
- names[name] = images[n]
}
- // Implicit digest
- if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
- digests[digest] = append(digests[digest], images[n])
+ // Compute the digest list.
+ err = image.recomputeDigests()
+ if err != nil {
+ return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
+ }
+ for _, name := range image.Names {
+ names[name] = image
}
- // Explicit digest
- if image.Digest == "" {
- image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
- } else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
- digests[image.Digest] = append(digests[image.Digest], images[n])
+ for _, digest := range image.Digests {
+ list := digests[digest]
+ digests[digest] = append(list, image)
}
}
}
@@ -333,12 +402,12 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
}
if _, idInUse := r.byid[id]; idInUse {
- return nil, ErrDuplicateID
+ return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
}
names = dedupeNames(names)
for _, name := range names {
- if _, nameInUse := r.byname[name]; nameInUse {
- return nil, ErrDuplicateName
+ if image, nameInUse := r.byname[name]; nameInUse {
+ return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
}
}
if created.IsZero() {
@@ -348,6 +417,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
image = &Image{
ID: id,
Digest: searchableDigest,
+ Digests: nil,
Names: names,
TopLayer: layer,
Metadata: metadata,
@@ -357,16 +427,20 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
Created: created,
Flags: make(map[string]interface{}),
}
+ err := image.recomputeDigests()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error validating digests for new image")
+ }
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
- if searchableDigest != "" {
- list := r.bydigest[searchableDigest]
- r.bydigest[searchableDigest] = append(list, image)
- }
for _, name := range names {
r.byname[name] = image
}
+ for _, digest := range image.Digests {
+ list := r.bydigest[digest]
+ r.bydigest[digest] = append(list, image)
+ }
err = r.Save()
image = copyImage(image)
}
@@ -444,6 +518,14 @@ func (r *imageStore) Delete(id string) error {
for _, name := range image.Names {
delete(r.byname, name)
}
+ for _, digest := range image.Digests {
+ prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
+ if len(prunedList) == 0 {
+ delete(r.bydigest, digest)
+ } else {
+ r.bydigest[digest] = prunedList
+ }
+ }
if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
@@ -452,28 +534,6 @@ func (r *imageStore) Delete(id string) error {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
- if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
- // remove the image from the digest-based index
- if list, ok := r.bydigest[digest]; ok {
- prunedList := imageSliceWithoutValue(list, image)
- if len(prunedList) == 0 {
- delete(r.bydigest, digest)
- } else {
- r.bydigest[digest] = prunedList
- }
- }
- }
- if image.Digest != "" {
- // remove the image's hard-coded digest from the digest-based index
- if list, ok := r.bydigest[image.Digest]; ok {
- prunedList := imageSliceWithoutValue(list, image)
- if len(prunedList) == 0 {
- delete(r.bydigest, image.Digest)
- } else {
- r.bydigest[image.Digest] = prunedList
- }
- }
- }
if err := r.Save(); err != nil {
return err
}
@@ -504,7 +564,7 @@ func (r *imageStore) Exists(id string) bool {
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok {
- return images, nil
+ return copyImageSlice(images), nil
}
return nil, ErrImageUnknown
}
@@ -606,10 +666,19 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
if !ok {
return ErrImageUnknown
}
- if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
+ err := os.MkdirAll(r.datadir(image.ID), 0700)
+ if err != nil {
return err
}
- err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
+ var newDigest digest.Digest
+ if bigDataNameIsManifest(key) {
+ if newDigest, err = manifest.Digest(data); err != nil {
+ return errors.Wrapf(err, "error digesting manifest")
+ }
+ } else {
+ newDigest = digest.Canonical.FromBytes(data)
+ }
+ err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
save := false
if image.BigDataSizes == nil {
@@ -621,7 +690,6 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := image.BigDataDigests[key]
- newDigest := digest.Canonical.FromBytes(data)
image.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
@@ -637,20 +705,21 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
- if key == ImageDigestBigDataKey {
- if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
- // remove the image from the list of images in the digest-based
- // index which corresponds to the old digest for this item, unless
- // it's also the hard-coded digest
- if list, ok := r.bydigest[oldDigest]; ok {
- prunedList := imageSliceWithoutValue(list, image)
- if len(prunedList) == 0 {
- delete(r.bydigest, oldDigest)
- } else {
- r.bydigest[oldDigest] = prunedList
- }
+ for _, oldDigest := range image.Digests {
+ // remove the image from the list of images in the digest-based index
+ if list, ok := r.bydigest[oldDigest]; ok {
+ prunedList := imageSliceWithoutValue(list, image)
+ if len(prunedList) == 0 {
+ delete(r.bydigest, oldDigest)
+ } else {
+ r.bydigest[oldDigest] = prunedList
}
}
+ }
+ if err = image.recomputeDigests(); err != nil {
+ return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
+ }
+ for _, newDigest := range image.Digests {
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]
@@ -687,6 +756,10 @@ func (r *imageStore) Lock() {
r.lockfile.Lock()
}
+func (r *imageStore) RLock() {
+ r.lockfile.RLock()
+}
+
func (r *imageStore) Unlock() {
r.lockfile.Unlock()
}
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index 539acfe93..6b40ebd59 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./images.go
+// source: images.go
package storage
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index cdc3cbba9..d612f0459 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -1222,6 +1222,10 @@ func (r *layerStore) Lock() {
r.lockfile.Lock()
}
+func (r *layerStore) RLock() {
+ r.lockfile.RLock()
+}
+
func (r *layerStore) Unlock() {
r.lockfile.Unlock()
}
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
index 09b5d0f33..125b5d8c9 100644
--- a/vendor/github.com/containers/storage/layers_ffjson.go
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./layers.go
+// source: layers.go
package storage
diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go
index 9f6a18144..7f07b9ac5 100644
--- a/vendor/github.com/containers/storage/lockfile.go
+++ b/vendor/github.com/containers/storage/lockfile.go
@@ -1,7 +1,6 @@
package storage
import (
- "fmt"
"path/filepath"
"sync"
"time"
@@ -13,7 +12,14 @@ import (
// identifier of the last party that made changes to whatever's being protected
// by the lock.
type Locker interface {
- sync.Locker
+ // Acquire a writer lock.
+ Lock()
+
+ // Unlock the lock.
+ Unlock()
+
+ // Acquire a reader lock.
+ RLock()
// Touch records, for others sharing the lock, that the caller was the
// last writer. It should only be called with the lock held.
@@ -39,31 +45,22 @@ var (
)
// GetLockfile opens a read-write lock file, creating it if necessary. The
-// Locker object it returns will be returned unlocked.
+// Locker object may already be locked if the path has already been requested
+// by the current process.
func GetLockfile(path string) (Locker, error) {
- lockfilesLock.Lock()
- defer lockfilesLock.Unlock()
- if lockfiles == nil {
- lockfiles = make(map[string]Locker)
- }
- cleanPath := filepath.Clean(path)
- if locker, ok := lockfiles[cleanPath]; ok {
- if !locker.IsReadWrite() {
- return nil, errors.Wrapf(ErrLockReadOnly, "lock %q is a read-only lock", cleanPath)
- }
- return locker, nil
- }
- locker, err := getLockFile(path, false) // platform dependent locker
- if err != nil {
- return nil, err
- }
- lockfiles[filepath.Clean(path)] = locker
- return locker, nil
+ return getLockfile(path, false)
}
-// GetROLockfile opens a read-only lock file. The Locker object it returns
-// will be returned unlocked.
+// GetROLockfile opens a read-only lock file, creating it if necessary. The
+// Locker object may already be locked if the path has already been requested
+// by the current process.
func GetROLockfile(path string) (Locker, error) {
+ return getLockfile(path, true)
+}
+
+// getLockfile is a helper for GetLockfile and GetROLockfile and returns Locker
+// based on the path and read-only property.
+func getLockfile(path string, ro bool) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
@@ -71,12 +68,15 @@ func GetROLockfile(path string) (Locker, error) {
}
cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok {
- if locker.IsReadWrite() {
- return nil, fmt.Errorf("lock %q is a read-write lock", cleanPath)
+ if ro && locker.IsReadWrite() {
+ return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
+ }
+ if !ro && !locker.IsReadWrite() {
+ return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath)
}
return locker, nil
}
- locker, err := getLockFile(path, true) // platform dependent locker
+ locker, err := getLockFile(path, ro) // platform dependent locker
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go
index 31e39c02e..0adbc49a5 100644
--- a/vendor/github.com/containers/storage/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/lockfile_unix.go
@@ -3,6 +3,7 @@
package storage
import (
+ "fmt"
"os"
"sync"
"time"
@@ -24,38 +25,84 @@ func getLockFile(path string, ro bool) (Locker, error) {
return nil, errors.Wrapf(err, "error opening %q", path)
}
unix.CloseOnExec(fd)
+
+ locktype := unix.F_WRLCK
if ro {
- return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK, locked: false}, nil
+ locktype = unix.F_RDLCK
}
- return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK, locked: false}, nil
+ return &lockfile{
+ stateMutex: &sync.Mutex{},
+ writeMutex: &sync.Mutex{},
+ file: path,
+ fd: uintptr(fd),
+ lw: stringid.GenerateRandomID(),
+ locktype: int16(locktype),
+ locked: false,
+ ro: ro}, nil
}
type lockfile struct {
- mu sync.Mutex
- file string
- fd uintptr
- lw string
- locktype int16
- locked bool
+ // stateMutex is used to synchronize concurrent accesses
+ stateMutex *sync.Mutex
+ // writeMutex is used to serialize and avoid recursive writer locks
+ writeMutex *sync.Mutex
+ counter int64
+ file string
+ fd uintptr
+ lw string
+ locktype int16
+ locked bool
+ ro bool
}
-// Lock locks the lock file
-func (l *lockfile) Lock() {
+// lock locks the lockfile via FCTNL(2) based on the specified type and
+// command.
+func (l *lockfile) lock(l_type int16) {
lk := unix.Flock_t{
- Type: l.locktype,
+ Type: l_type,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
- l.mu.Lock()
+ if l_type == unix.F_WRLCK {
+ // If we try to lock as a writer, lock the writerMutex first to
+ // avoid multiple writer acquisitions of the same process.
+ // Note: it's important to lock it prior to the stateMutex to
+ // avoid a deadlock.
+ l.writeMutex.Lock()
+ }
+ l.stateMutex.Lock()
+ l.locktype = l_type
+ if l.counter == 0 {
+ // Optimization: only use the (expensive) fcntl syscall when
+ // the counter is 0. If it's greater than that, we're owning
+ // the lock already and can only be a reader.
+ for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
l.locked = true
- for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
- time.Sleep(10 * time.Millisecond)
+ l.counter++
+ l.stateMutex.Unlock()
+}
+
+// Lock locks the lockfile as a writer. Note that RLock() will be called if
+// the lock is a read-only one.
+func (l *lockfile) Lock() {
+ if l.ro {
+ l.RLock()
+ } else {
+ l.lock(unix.F_WRLCK)
}
}
-// Unlock unlocks the lock file
+// LockRead locks the lockfile as a reader.
+func (l *lockfile) RLock() {
+ l.lock(unix.F_RDLCK)
+}
+
+// Unlock unlocks the lockfile.
func (l *lockfile) Unlock() {
lk := unix.Flock_t{
Type: unix.F_UNLCK,
@@ -64,19 +111,40 @@ func (l *lockfile) Unlock() {
Len: 0,
Pid: int32(os.Getpid()),
}
- for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
- time.Sleep(10 * time.Millisecond)
+ l.stateMutex.Lock()
+ if l.locked == false {
+ // Panic when unlocking an unlocked lock. That's a violation
+ // of the lock semantics and will reveal such.
+ panic("calling Unlock on unlocked lock")
+ }
+ l.counter--
+ if l.counter < 0 {
+ // Panic when the counter is negative. There is no way we can
+ // recover from a corrupted lock and we need to protect the
+ // storage from corruption.
+ panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
+ }
+ if l.counter == 0 {
+ // We should only release the lock when the counter is 0 to
+ // avoid releasing read-locks too early; a given process may
+ // acquire a read lock multiple times.
+ l.locked = false
+ for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+ if l.locktype == unix.F_WRLCK {
+ l.writeMutex.Unlock()
}
- l.locked = false
- l.mu.Unlock()
+ l.stateMutex.Unlock()
}
-// Check if lock is locked
+// Locked checks if lockfile is locked.
func (l *lockfile) Locked() bool {
return l.locked
}
-// Touch updates the lock file with the UID of the user
+// Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
@@ -98,7 +166,8 @@ func (l *lockfile) Touch() error {
return nil
}
-// Modified indicates if the lock file has been updated since the last time it was loaded
+// Modified indicates if the lockfile has been updated since the last time it
+// was loaded.
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
@@ -117,7 +186,7 @@ func (l *lockfile) Modified() (bool, error) {
return l.lw != lw, nil
}
-// IsRWLock indicates if the lock file is a read-write lock
+// IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *lockfile) IsReadWrite() bool {
- return (l.locktype == unix.F_WRLCK)
+ return !l.ro
}
diff --git a/vendor/github.com/containers/storage/lockfile_windows.go b/vendor/github.com/containers/storage/lockfile_windows.go
index 77490b851..a3821bfeb 100644
--- a/vendor/github.com/containers/storage/lockfile_windows.go
+++ b/vendor/github.com/containers/storage/lockfile_windows.go
@@ -23,6 +23,11 @@ func (l *lockfile) Lock() {
l.locked = true
}
+func (l *lockfile) RLock() {
+ l.mu.Lock()
+ l.locked = true
+}
+
func (l *lockfile) Unlock() {
l.locked = false
l.mu.Unlock()
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 228d8bb82..ba1704250 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -13,6 +13,7 @@ import (
"path/filepath"
"runtime"
"strings"
+ "sync"
"syscall"
"github.com/containers/storage/pkg/fileutils"
@@ -23,6 +24,7 @@ import (
"github.com/containers/storage/pkg/system"
gzip "github.com/klauspost/pgzip"
rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -1331,3 +1333,108 @@ const (
// HeaderSize is the size in bytes of a tar header
HeaderSize = 512
)
+
+// NewArchiver returns a new Archiver
+func NewArchiver(idMappings *idtools.IDMappings) *Archiver {
+ if idMappings == nil {
+ idMappings = &idtools.IDMappings{}
+ }
+ return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
+}
+
+// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends
+func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver {
+ if tarIDMappings == nil {
+ tarIDMappings = &idtools.IDMappings{}
+ }
+ if untarIDMappings == nil {
+ untarIDMappings = &idtools.IDMappings{}
+ }
+ return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
+}
+
+// CopyFileWithTarAndChown returns a function which copies a single file from outside
+// of any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
+ contentReader, contentWriter, err := os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ }
+ defer contentReader.Close()
+ defer contentWriter.Close()
+ var hashError error
+ var hashWorker sync.WaitGroup
+ hashWorker.Add(1)
+ go func() {
+ t := tar.NewReader(contentReader)
+ _, err := t.Next()
+ if err != nil {
+ hashError = err
+ }
+ if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
+ hashError = err
+ }
+ hashWorker.Done()
+ }()
+ if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
+ err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ }
+ hashWorker.Wait()
+ if err == nil {
+ err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ }
+ return err
+ }
+ }
+ return archiver.CopyFileWithTar
+}
+
+// CopyWithTarAndChown returns a function which copies a directory tree from outside of
+// any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.CopyWithTar
+}
+
+// UntarPathAndChown returns a function which extracts an archive in a specified
+// location into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.UntarPath
+}
+
+// TarPath returns a function which creates an archive of a specified
+// location in the container's filesystem, mapping permissions using the
+// container's ID maps
+func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) {
+ tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ return func(path string) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{
+ Compression: Uncompressed,
+ UIDMaps: tarMappings.UIDs(),
+ GIDMaps: tarMappings.GIDs(),
+ })
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
index 2ba44b85d..9b8103e4d 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./pkg/archive/archive.go
+// source: pkg/archive/archive.go
package archive
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index dde8d44d3..a36ff1cb1 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -1,34 +1,32 @@
package chrootarchive
import (
+ "archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
+ "sync"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/pkg/errors"
)
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
- if idMappings == nil {
- idMappings = &idtools.IDMappings{}
- }
- return &archive.Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
+ archiver := archive.NewArchiver(idMappings)
+ archiver.Untar = Untar
+ return archiver
}
// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends
func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver {
- if tarIDMappings == nil {
- tarIDMappings = &idtools.IDMappings{}
- }
- if untarIDMappings == nil {
- untarIDMappings = &idtools.IDMappings{}
- }
- return &archive.Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
+ archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings)
+ archiver.Untar = Untar
+ return archiver
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
@@ -81,3 +79,75 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
return invokeUnpack(r, dest, options)
}
+
+// CopyFileWithTarAndChown returns a function which copies a single file from outside
+// of any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ contentReader, contentWriter, err := os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ }
+ defer contentReader.Close()
+ defer contentWriter.Close()
+ var hashError error
+ var hashWorker sync.WaitGroup
+ hashWorker.Add(1)
+ go func() {
+ t := tar.NewReader(contentReader)
+ _, err := t.Next()
+ if err != nil {
+ hashError = err
+ }
+ if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
+ hashError = err
+ }
+ hashWorker.Done()
+ }()
+ if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
+ err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ }
+ hashWorker.Wait()
+ if err == nil {
+ err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ }
+ return err
+ }
+ }
+ return archiver.CopyFileWithTar
+}
+
+// CopyWithTarAndChown returns a function which copies a directory tree from outside of
+// any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.CopyWithTar
+}
+
+// UntarPathAndChown returns a function which extracts an archive in a specified
+// location into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.UntarPath
+}
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
new file mode 100644
index 000000000..bdb5fbcb8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -0,0 +1,96 @@
+package config
+
+// ThinpoolOptionsConfig represents the "storage.options.thinpool"
+// TOML config table.
+type ThinpoolOptionsConfig struct {
+ // AutoExtendPercent determines the amount by which pool needs to be
+ // grown. This is specified in terms of % of pool size. So a value of
+ // 20 means that when threshold is hit, pool will be grown by 20% of
+ // existing pool size.
+ AutoExtendPercent string `toml:"autoextend_percent"`
+
+ // AutoExtendThreshold determines the pool extension threshold in terms
+ // of percentage of pool size. For example, if threshold is 60, that
+ // means when pool is 60% full, threshold has been hit.
+ AutoExtendThreshold string `toml:"autoextend_threshold"`
+
+ // BaseSize specifies the size to use when creating the base device,
+ // which limits the size of images and containers.
+ BaseSize string `toml:"basesize"`
+
+ // BlockSize specifies a custom blocksize to use for the thin pool.
+ BlockSize string `toml:"blocksize"`
+
+ // DirectLvmDevice specifies a custom block storage device to use for
+ // the thin pool.
+ DirectLvmDevice string `toml:"directlvm_device"`
+
+ // DirectLvmDeviceForcewipes device even if device already has a
+ // filesystem
+ DirectLvmDeviceForce string `toml:"directlvm_device_force"`
+
+ // Fs specifies the filesystem type to use for the base device.
+ Fs string `toml:"fs"`
+
+ // log_level sets the log level of devicemapper.
+ LogLevel string `toml:"log_level"`
+
+ // MinFreeSpace specifies the min free space percent in a thin pool
+ // require for new device creation to
+ MinFreeSpace string `toml:"min_free_space"`
+
+ // MkfsArg specifies extra mkfs arguments to be used when creating the
+ // basedevice.
+ MkfsArg string `toml:"mkfsarg"`
+
+ // MountOpt specifies extra mount options used when mounting the thin
+ // devices.
+ MountOpt string `toml:"mountopt"`
+
+ // UseDeferredDeletion marks device for deferred deletion
+ UseDeferredDeletion string `toml:"use_deferred_deletion"`
+
+ // UseDeferredRemoval marks device for deferred removal
+ UseDeferredRemoval string `toml:"use_deferred_removal"`
+
+ // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
+ // retries XFS should attempt to complete IO when ENOSPC (no space)
+ // error is returned by underlying storage device.
+ XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
+}
+
+// OptionsConfig represents the "storage.options" TOML config table.
+type OptionsConfig struct {
+ // AdditionalImagesStores is the location of additional read/only
+ // Image stores. Usually used to access Networked File System
+ // for shared image content
+ AdditionalImageStores []string `toml:"additionalimagestores"`
+
+ // Size
+ Size string `toml:"size"`
+
+ // RemapUIDs is a list of default UID mappings to use for layers.
+ RemapUIDs string `toml:"remap-uids"`
+ // RemapGIDs is a list of default GID mappings to use for layers.
+ RemapGIDs string `toml:"remap-gids"`
+
+ // RemapUser is the name of one or more entries in /etc/subuid which
+ // should be used to set up default UID mappings.
+ RemapUser string `toml:"remap-user"`
+ // RemapGroup is the name of one or more entries in /etc/subgid which
+ // should be used to set up default GID mappings.
+ RemapGroup string `toml:"remap-group"`
+ // Thinpool container options to be handed to thinpool drivers
+ Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
+ // OSTree repository
+ OstreeRepo string `toml:"ostree_repo"`
+
+ // Do not create a bind mount on the storage home
+ SkipMountHome string `toml:"skip_mount_home"`
+
+ // Alternative program to use for the mount of the file system
+ MountProgram string `toml:"mount_program"`
+
+ // MountOpt specifies extra mount options used when mounting
+ MountOpt string `toml:"mountopt"`
+}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 3fe305cc1..d53703d6b 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -18,6 +18,7 @@ import (
"github.com/BurntSushi/toml"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
@@ -565,10 +566,10 @@ func GetStore(options StoreOptions) (Store, error) {
}
if options.GraphRoot == "" {
- return nil, ErrIncompleteOptions
+ return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
}
if options.RunRoot == "" {
- return nil, ErrIncompleteOptions
+ return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
@@ -842,12 +843,16 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return nil, -1, err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, -1, err
+ }
}
if id == "" {
id = stringid.GenerateRandomID()
@@ -865,12 +870,15 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
gidMap := options.GIDMap
if parent != "" {
var ilayer *Layer
- for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) {
+ for _, l := range append([]ROLayerStore{rlstore}, rlstores...) {
+ lstore := l
if lstore != rlstore {
lstore.Lock()
defer lstore.Unlock()
if modified, err := lstore.Modified(); modified || err != nil {
- lstore.Load()
+ if err = lstore.Load(); err != nil {
+ return nil, -1, err
+ }
}
}
if l, err := lstore.Get(parent); err == nil && l != nil {
@@ -942,11 +950,14 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
return nil, err
}
var ilayer *Layer
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
ilayer, err = store.Get(layer)
if err == nil {
@@ -966,7 +977,9 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return nil, err
+ }
}
creationDate := time.Now().UTC()
@@ -999,12 +1012,15 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
}
var layer, parentLayer *Layer
// Locate the image's top layer and its parent, if it has one.
- for _, store := range append([]ROLayerStore{rlstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{rlstore}, lstores...) {
+ store := s
if store != rlstore {
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
}
// Walk the top layer list.
@@ -1125,14 +1141,19 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return nil, err
+ }
}
var cimage *Image
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
cimage, err = store.Get(image)
if err == nil {
@@ -1162,7 +1183,9 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return nil, err
+ }
}
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
@@ -1222,7 +1245,9 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
options.IDMappingOptions = IDMappingOptions{
HostUIDMapping: len(options.UIDMap) == 0,
@@ -1254,17 +1279,23 @@ func (s *store) SetMetadata(id, metadata string) error {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return err
+ }
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err := ristore.Load(); err != nil {
+ return err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return err
+ }
}
if rlstore.Exists(id) {
@@ -1288,11 +1319,14 @@ func (s *store) Metadata(id string) (string, error) {
if err != nil {
return "", err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return "", err
+ }
}
if store.Exists(id) {
return store.Metadata(id)
@@ -1307,11 +1341,14 @@ func (s *store) Metadata(id string) (string, error) {
if err != nil {
return "", err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return "", err
+ }
}
if store.Exists(id) {
return store.Metadata(id)
@@ -1325,7 +1362,9 @@ func (s *store) Metadata(id string) (string, error) {
cstore.Lock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
- cstore.Load()
+ if err = cstore.Load(); err != nil {
+ return "", err
+ }
}
if cstore.Exists(id) {
return cstore.Metadata(id)
@@ -1342,11 +1381,14 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
bigDataNames, err := store.BigDataNames(id)
if err == nil {
@@ -1365,11 +1407,14 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
if err != nil {
return -1, err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return -1, err
+ }
}
size, err := store.BigDataSize(id, key)
if err == nil {
@@ -1389,11 +1434,14 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
return "", err
}
stores = append([]ROImageStore{ristore}, stores...)
- for _, ristore := range stores {
+ for _, r := range stores {
+ ristore := r
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return "", nil
+ }
}
d, err := ristore.BigDataDigest(id, key)
if err == nil && d.Validate() == nil {
@@ -1412,11 +1460,14 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
- store.Lock()
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
data, err := store.BigData(id, key)
if err == nil {
@@ -1435,7 +1486,9 @@ func (s *store) SetImageBigData(id, key string, data []byte) error {
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return nil
+ }
}
return ristore.SetBigData(id, key, data)
@@ -1452,11 +1505,14 @@ func (s *store) ImageSize(id string) (int64, error) {
if err != nil {
return -1, errors.Wrapf(err, "error loading additional layer stores")
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return -1, err
+ }
}
}
@@ -1471,11 +1527,14 @@ func (s *store) ImageSize(id string) (int64, error) {
}
// Look for the image's record.
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return -1, err
+ }
}
if image, err = store.Get(id); err == nil {
imageStore = store
@@ -1556,11 +1615,14 @@ func (s *store) ContainerSize(id string) (int64, error) {
if err != nil {
return -1, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return -1, err
+ }
}
}
@@ -1582,7 +1644,9 @@ func (s *store) ContainerSize(id string) (int64, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return -1, err
+ }
}
// Read the container record.
@@ -1644,7 +1708,9 @@ func (s *store) ListContainerBigData(id string) ([]string, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
return rcstore.BigDataNames(id)
@@ -1658,7 +1724,9 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return -1, err
+ }
}
return rcstore.BigDataSize(id, key)
}
@@ -1671,7 +1739,9 @@ func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return "", err
+ }
}
return rcstore.BigDataDigest(id, key)
}
@@ -1684,7 +1754,9 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
return rcstore.BigData(id, key)
}
@@ -1697,7 +1769,9 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return err
+ }
}
return rcstore.SetBigData(id, key, data)
}
@@ -1711,11 +1785,14 @@ func (s *store) Exists(id string) bool {
if err != nil {
return false
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return false
+ }
}
if store.Exists(id) {
return true
@@ -1730,11 +1807,14 @@ func (s *store) Exists(id string) bool {
if err != nil {
return false
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return false
+ }
}
if store.Exists(id) {
return true
@@ -1748,7 +1828,9 @@ func (s *store) Exists(id string) bool {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return false
+ }
}
if rcstore.Exists(id) {
return true
@@ -1779,7 +1861,9 @@ func (s *store) SetNames(id string, names []string) error {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return err
+ }
}
if rlstore.Exists(id) {
return rlstore.SetNames(id, deduped)
@@ -1792,7 +1876,9 @@ func (s *store) SetNames(id string, names []string) error {
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return err
+ }
}
if ristore.Exists(id) {
return ristore.SetNames(id, deduped)
@@ -1805,7 +1891,9 @@ func (s *store) SetNames(id string, names []string) error {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return err
+ }
}
if rcstore.Exists(id) {
return rcstore.SetNames(id, deduped)
@@ -1822,11 +1910,14 @@ func (s *store) Names(id string) ([]string, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
if l, err := store.Get(id); l != nil && err == nil {
return l.Names, nil
@@ -1841,11 +1932,14 @@ func (s *store) Names(id string) ([]string, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
if i, err := store.Get(id); i != nil && err == nil {
return i.Names, nil
@@ -1859,7 +1953,9 @@ func (s *store) Names(id string) ([]string, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
if c, err := rcstore.Get(id); c != nil && err == nil {
return c.Names, nil
@@ -1876,11 +1972,14 @@ func (s *store) Lookup(name string) (string, error) {
if err != nil {
return "", err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return "", err
+ }
}
if l, err := store.Get(name); l != nil && err == nil {
return l.ID, nil
@@ -1895,11 +1994,14 @@ func (s *store) Lookup(name string) (string, error) {
if err != nil {
return "", err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return "", err
+ }
}
if i, err := store.Get(name); i != nil && err == nil {
return i.ID, nil
@@ -1913,7 +2015,9 @@ func (s *store) Lookup(name string) (string, error) {
cstore.Lock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
- cstore.Load()
+ if err = cstore.Load(); err != nil {
+ return "", err
+ }
}
if c, err := cstore.Get(name); c != nil && err == nil {
return c.ID, nil
@@ -1939,17 +2043,23 @@ func (s *store) DeleteLayer(id string) error {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return err
+ }
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return err
+ }
}
if rlstore.Exists(id) {
@@ -2005,17 +2115,23 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return nil, err
+ }
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return nil, err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
layersToRemove := []string{}
if ristore.Exists(id) {
@@ -2137,17 +2253,23 @@ func (s *store) DeleteContainer(id string) error {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return err
+ }
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return err
+ }
}
if rcstore.Exists(id) {
@@ -2192,17 +2314,23 @@ func (s *store) Delete(id string) error {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return err
+ }
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err := ristore.Load(); err != nil {
+ return err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return err
+ }
}
if rcstore.Exists(id) {
@@ -2254,17 +2382,23 @@ func (s *store) Wipe() error {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return err
+ }
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
- ristore.Load()
+ if err = ristore.Load(); err != nil {
+ return err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return err
+ }
}
if err = rcstore.Wipe(); err != nil {
@@ -2306,7 +2440,9 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return "", err
+ }
}
if rlstore.Exists(id) {
options := drivers.MountOpts{
@@ -2331,7 +2467,9 @@ func (s *store) Mounted(id string) (int, error) {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return 0, err
+ }
}
return rlstore.Mounted(id)
@@ -2348,7 +2486,9 @@ func (s *store) Unmount(id string, force bool) (bool, error) {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return false, err
+ }
}
if rlstore.Exists(id) {
return rlstore.Unmount(id, force)
@@ -2365,11 +2505,14 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
if store.Exists(to) {
return store.Changes(from, to)
@@ -2387,11 +2530,14 @@ func (s *store) DiffSize(from, to string) (int64, error) {
if err != nil {
return -1, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return -1, err
+ }
}
if store.Exists(to) {
return store.DiffSize(from, to)
@@ -2409,10 +2555,13 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
if err != nil {
return nil, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
- store.Lock()
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
+ store.RLock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
if store.Exists(to) {
rc, err := store.Diff(from, to, options)
@@ -2440,7 +2589,9 @@ func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return -1, err
+ }
}
if rlstore.Exists(to) {
return rlstore.ApplyDiff(to, diff)
@@ -2459,11 +2610,14 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
if err != nil {
return nil, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
storeLayers, err := m(store, d)
if err != nil {
@@ -2503,11 +2657,14 @@ func (s *store) LayerSize(id string) (int64, error) {
if err != nil {
return -1, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return -1, err
+ }
}
if store.Exists(id) {
return store.Size(id)
@@ -2524,7 +2681,9 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return nil, nil, err
+ }
}
if rlstore.Exists(id) {
return rlstore.ParentOwners(id)
@@ -2544,12 +2703,16 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return nil, nil, err
+ }
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, nil, err
+ }
}
container, err := rcstore.Get(id)
if err != nil {
@@ -2573,11 +2736,14 @@ func (s *store) Layers() ([]Layer, error) {
return nil, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
storeLayers, err := store.Layers()
if err != nil {
@@ -2599,11 +2765,14 @@ func (s *store) Images() ([]Image, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
storeImages, err := store.Images()
if err != nil {
@@ -2623,7 +2792,9 @@ func (s *store) Containers() ([]Container, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
return rcstore.Containers()
@@ -2638,11 +2809,14 @@ func (s *store) Layer(id string) (*Layer, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
layer, err := store.Get(id)
if err == nil {
@@ -2661,11 +2835,14 @@ func (s *store) Image(id string) (*Image, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
image, err := store.Get(id)
if err == nil {
@@ -2691,11 +2868,14 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
if err != nil {
return nil, err
}
- for _, store := range append([]ROImageStore{istore}, istores...) {
+ for _, s := range append([]ROImageStore{istore}, istores...) {
+ store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
imageList, err := store.Images()
if err != nil {
@@ -2726,7 +2906,9 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
- store.Load()
+ if err = store.Load(); err != nil {
+ return nil, err
+ }
}
imageList, err := store.ByDigest(d)
if err != nil && err != ErrImageUnknown {
@@ -2745,7 +2927,9 @@ func (s *store) Container(id string) (*Container, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
return rcstore.Get(id)
@@ -2759,7 +2943,9 @@ func (s *store) ContainerLayerID(id string) (string, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return "", err
+ }
}
container, err := rcstore.Get(id)
if err != nil {
@@ -2780,7 +2966,9 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return nil, err
+ }
}
containerList, err := rcstore.Containers()
if err != nil {
@@ -2803,7 +2991,9 @@ func (s *store) ContainerDirectory(id string) (string, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return "", err
+ }
}
id, err = rcstore.Lookup(id)
@@ -2828,7 +3018,9 @@ func (s *store) ContainerRunDirectory(id string) (string, error) {
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
- rcstore.Load()
+ if err = rcstore.Load(); err != nil {
+ return "", err
+ }
}
id, err = rcstore.Lookup(id)
@@ -2899,7 +3091,9 @@ func (s *store) Shutdown(force bool) ([]string, error) {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
- rlstore.Load()
+ if err = rlstore.Load(); err != nil {
+ return nil, err
+ }
}
layers, err := rlstore.Layers()
@@ -2992,6 +3186,15 @@ func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
return ret
}
+func copyDigestSlice(slice []digest.Digest) []digest.Digest {
+ if len(slice) == 0 {
+ return nil
+ }
+ ret := make([]digest.Digest, len(slice))
+ copy(ret, slice)
+ return ret
+}
+
// copyStringInterfaceMap still forces us to assume that the interface{} is
// a non-pointer scalar value
func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
@@ -3005,108 +3208,13 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
// DefaultConfigFile path to the system wide storage.conf file
const DefaultConfigFile = "/etc/containers/storage.conf"
-// ThinpoolOptionsConfig represents the "storage.options.thinpool"
-// TOML config table.
-type ThinpoolOptionsConfig struct {
- // AutoExtendPercent determines the amount by which pool needs to be
- // grown. This is specified in terms of % of pool size. So a value of
- // 20 means that when threshold is hit, pool will be grown by 20% of
- // existing pool size.
- AutoExtendPercent string `toml:"autoextend_percent"`
-
- // AutoExtendThreshold determines the pool extension threshold in terms
- // of percentage of pool size. For example, if threshold is 60, that
- // means when pool is 60% full, threshold has been hit.
- AutoExtendThreshold string `toml:"autoextend_threshold"`
-
- // BaseSize specifies the size to use when creating the base device,
- // which limits the size of images and containers.
- BaseSize string `toml:"basesize"`
-
- // BlockSize specifies a custom blocksize to use for the thin pool.
- BlockSize string `toml:"blocksize"`
-
- // DirectLvmDevice specifies a custom block storage device to use for
- // the thin pool.
- DirectLvmDevice string `toml:"directlvm_device"`
-
- // DirectLvmDeviceForcewipes device even if device already has a
- // filesystem
- DirectLvmDeviceForce string `toml:"directlvm_device_force"`
-
- // Fs specifies the filesystem type to use for the base device.
- Fs string `toml:"fs"`
-
- // log_level sets the log level of devicemapper.
- LogLevel string `toml:"log_level"`
-
- // MinFreeSpace specifies the min free space percent in a thin pool
- // require for new device creation to
- MinFreeSpace string `toml:"min_free_space"`
-
- // MkfsArg specifies extra mkfs arguments to be used when creating the
- // basedevice.
- MkfsArg string `toml:"mkfsarg"`
-
- // MountOpt specifies extra mount options used when mounting the thin
- // devices.
- MountOpt string `toml:"mountopt"`
-
- // UseDeferredDeletion marks device for deferred deletion
- UseDeferredDeletion string `toml:"use_deferred_deletion"`
-
- // UseDeferredRemoval marks device for deferred removal
- UseDeferredRemoval string `toml:"use_deferred_removal"`
-
- // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
- // retries XFS should attempt to complete IO when ENOSPC (no space)
- // error is returned by underlying storage device.
- XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
-}
-
-// OptionsConfig represents the "storage.options" TOML config table.
-type OptionsConfig struct {
- // AdditionalImagesStores is the location of additional read/only
- // Image stores. Usually used to access Networked File System
- // for shared image content
- AdditionalImageStores []string `toml:"additionalimagestores"`
-
- // Size
- Size string `toml:"size"`
-
- // RemapUIDs is a list of default UID mappings to use for layers.
- RemapUIDs string `toml:"remap-uids"`
- // RemapGIDs is a list of default GID mappings to use for layers.
- RemapGIDs string `toml:"remap-gids"`
-
- // RemapUser is the name of one or more entries in /etc/subuid which
- // should be used to set up default UID mappings.
- RemapUser string `toml:"remap-user"`
- // RemapGroup is the name of one or more entries in /etc/subgid which
- // should be used to set up default GID mappings.
- RemapGroup string `toml:"remap-group"`
- // Thinpool container options to be handed to thinpool drivers
- Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
- // OSTree repository
- OstreeRepo string `toml:"ostree_repo"`
-
- // Do not create a bind mount on the storage home
- SkipMountHome string `toml:"skip_mount_home"`
-
- // Alternative program to use for the mount of the file system
- MountProgram string `toml:"mount_program"`
-
- // MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
-}
-
// TOML-friendly explicit tables used for conversions.
type tomlConfig struct {
Storage struct {
- Driver string `toml:"driver"`
- RunRoot string `toml:"runroot"`
- GraphRoot string `toml:"graphroot"`
- Options struct{ OptionsConfig } `toml:"options"`
+ Driver string `toml:"driver"`
+ RunRoot string `toml:"runroot"`
+ GraphRoot string `toml:"graphroot"`
+ Options struct{ config.OptionsConfig } `toml:"options"`
} `toml:"storage"`
}
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index 04af9010b..c143b049d 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -1,12 +1,18 @@
github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
+github.com/containers/image master
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
+github.com/docker/libtrust master
+github.com/klauspost/compress v1.4.1
+github.com/klauspost/cpuid v1.2.0
+github.com/klauspost/pgzip v1.2.1
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
+github.com/opencontainers/image-spec master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux v1.1
github.com/ostreedev/ostree-go master
@@ -23,6 +29,3 @@ golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
gotest.tools master
github.com/google/go-cmp master
-github.com/klauspost/pgzip v1.2.1
-github.com/klauspost/compress v1.4.1
-github.com/klauspost/cpuid v1.2.0
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 000000000..5f0d1fb6a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 000000000..7a950d177
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 000000000..9d2d8a4ba
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 000000000..336142a5e
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 000000000..9a28e57c3
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 000000000..65dc692b6
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 000000000..1e69004bb
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,50 @@
+# go-isatty
+
+[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
+[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty)
+[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/mattn/go-isatty"
+ "os"
+)
+
+func main() {
+ if isatty.IsTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Terminal")
+ } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Cygwin/MSYS2 Terminal")
+ } else {
+ fmt.Println("Is Not Terminal")
+ }
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+## License
+
+MIT
+
+## Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
+
+## Thanks
+
+* k-takata: base idea for IsCygwinTerminal
+
+ https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 000000000..17d4f90eb
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 000000000..9584a9884
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on on appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+ return false
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 000000000..42f2514d1
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 000000000..7384cf991
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+// +build !appengine,!ppc64,!ppc64le
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
new file mode 100644
index 000000000..44e5d2130
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
@@ -0,0 +1,19 @@
+// +build linux
+// +build ppc64 ppc64le
+
+package isatty
+
+import (
+ "unsafe"
+
+ syscall "golang.org/x/sys/unix"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
new file mode 100644
index 000000000..9d8b4a599
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -0,0 +1,10 @@
+// +build !windows
+// +build !appengine
+
+package isatty
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 000000000..1f0c6bf53
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+ var termio unix.Termio
+ err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+ return err == nil
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 000000000..af51cbcaa
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,94 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+ "strings"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ fileNameInfo uintptr = 2
+ fileTypePipe = 3
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
+ procGetFileType = kernel32.NewProc("GetFileType")
+)
+
+func init() {
+ // Check if GetFileInformationByHandleEx is available.
+ if procGetFileInformationByHandleEx.Find() != nil {
+ procGetFileInformationByHandleEx = nil
+ }
+}
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+// Check pipe name is used for cygwin/msys2 pty.
+// Cygwin/MSYS2 PTY has a name like:
+// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
+func isCygwinPipeName(name string) bool {
+ token := strings.Split(name, "-")
+ if len(token) < 5 {
+ return false
+ }
+
+ if token[0] != `\msys` && token[0] != `\cygwin` {
+ return false
+ }
+
+ if token[1] == "" {
+ return false
+ }
+
+ if !strings.HasPrefix(token[2], "pty") {
+ return false
+ }
+
+ if token[3] != `from` && token[3] != `to` {
+ return false
+ }
+
+ if token[4] != "master" {
+ return false
+ }
+
+ return true
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal.
+func IsCygwinTerminal(fd uintptr) bool {
+ if procGetFileInformationByHandleEx == nil {
+ return false
+ }
+
+ // Cygwin/msys's pty is a pipe.
+ ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
+ if ft != fileTypePipe || e != 0 {
+ return false
+ }
+
+ var buf [2 + syscall.MAX_PATH]uint16
+ r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
+ 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
+ uintptr(len(buf)*2), 0, 0)
+ if r == 0 || e != 0 {
+ return false
+ }
+
+ l := *(*uint32)(unsafe.Pointer(&buf))
+ return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
+}
diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE
deleted file mode 100644
index 91b5cef30..000000000
--- a/vendor/github.com/mattn/go-runewidth/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Yasuhiro Matsumoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.mkd
deleted file mode 100644
index 66663a94b..000000000
--- a/vendor/github.com/mattn/go-runewidth/README.mkd
+++ /dev/null
@@ -1,27 +0,0 @@
-go-runewidth
-============
-
-[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
-[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
-[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
-[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
-
-Provides functions to get fixed width of the character or string.
-
-Usage
------
-
-```go
-runewidth.StringWidth("つのだ☆HIRO") == 12
-```
-
-
-Author
-------
-
-Yasuhiro Matsumoto
-
-License
--------
-
-under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
deleted file mode 100644
index 3cb94106f..000000000
--- a/vendor/github.com/mattn/go-runewidth/runewidth.go
+++ /dev/null
@@ -1,977 +0,0 @@
-package runewidth
-
-import (
- "os"
-)
-
-var (
- // EastAsianWidth will be set true if the current locale is CJK
- EastAsianWidth bool
-
- // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ
- ZeroWidthJoiner bool
-
- // DefaultCondition is a condition in current locale
- DefaultCondition = &Condition{}
-)
-
-func init() {
- handleEnv()
-}
-
-func handleEnv() {
- env := os.Getenv("RUNEWIDTH_EASTASIAN")
- if env == "" {
- EastAsianWidth = IsEastAsian()
- } else {
- EastAsianWidth = env == "1"
- }
- // update DefaultCondition
- DefaultCondition.EastAsianWidth = EastAsianWidth
- DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner
-}
-
-type interval struct {
- first rune
- last rune
-}
-
-type table []interval
-
-func inTables(r rune, ts ...table) bool {
- for _, t := range ts {
- if inTable(r, t) {
- return true
- }
- }
- return false
-}
-
-func inTable(r rune, t table) bool {
- // func (t table) IncludesRune(r rune) bool {
- if r < t[0].first {
- return false
- }
-
- bot := 0
- top := len(t) - 1
- for top >= bot {
- mid := (bot + top) >> 1
-
- switch {
- case t[mid].last < r:
- bot = mid + 1
- case t[mid].first > r:
- top = mid - 1
- default:
- return true
- }
- }
-
- return false
-}
-
-var private = table{
- {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD},
-}
-
-var nonprint = table{
- {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
- {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
- {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
- {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
-}
-
-var combining = table{
- {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD},
- {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5},
- {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F},
- {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4},
- {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711},
- {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3},
- {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827},
- {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1},
- {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F},
- {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983},
- {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8},
- {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3},
- {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42},
- {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51},
- {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83},
- {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9},
- {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03},
- {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48},
- {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63},
- {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8},
- {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03},
- {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D},
- {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83},
- {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8},
- {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3},
- {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48},
- {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63},
- {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4},
- {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3},
- {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E},
- {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC},
- {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35},
- {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F},
- {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97},
- {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E},
- {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064},
- {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D},
- {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F},
- {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753},
- {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD},
- {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9},
- {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B},
- {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F},
- {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44},
- {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD},
- {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2},
- {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4},
- {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF},
- {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F},
- {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A},
- {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F},
- {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806},
- {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881},
- {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D},
- {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0},
- {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43},
- {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0},
- {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF},
- {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6},
- {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E},
- {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD},
- {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03},
- {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A},
- {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002},
- {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA},
- {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173},
- {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC},
- {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA},
- {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344},
- {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357},
- {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
- {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5},
- {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640},
- {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36},
- {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6},
- {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E},
- {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169},
- {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B},
- {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36},
- {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84},
- {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
- {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
- {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A},
- {0xE0100, 0xE01EF},
-}
-
-var doublewidth = table{
- {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A},
- {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3},
- {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653},
- {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1},
- {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5},
- {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA},
- {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA},
- {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B},
- {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E},
- {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
- {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
- {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
- {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB},
- {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF},
- {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA},
- {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247},
- {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C},
- {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3},
- {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52},
- {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60},
- {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC},
- {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004},
- {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
- {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248},
- {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335},
- {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA},
- {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4},
- {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC},
- {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567},
- {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4},
- {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC},
- {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6},
- {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930},
- {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E},
- {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD},
- {0x30000, 0x3FFFD},
-}
-
-var ambiguous = table{
- {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8},
- {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4},
- {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6},
- {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1},
- {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED},
- {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA},
- {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101},
- {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B},
- {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133},
- {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144},
- {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153},
- {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE},
- {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4},
- {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA},
- {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261},
- {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB},
- {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB},
- {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F},
- {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1},
- {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F},
- {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016},
- {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022},
- {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033},
- {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E},
- {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084},
- {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105},
- {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116},
- {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B},
- {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B},
- {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199},
- {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4},
- {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203},
- {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F},
- {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A},
- {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225},
- {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237},
- {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C},
- {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267},
- {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283},
- {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299},
- {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312},
- {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573},
- {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1},
- {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7},
- {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8},
- {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5},
- {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609},
- {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E},
- {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661},
- {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D},
- {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF},
- {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1},
- {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1},
- {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC},
- {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F},
- {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF},
- {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A},
- {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D},
- {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF},
- {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD},
-}
-
-var emoji = table{
- {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122},
- {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA},
- {0x231A, 0x231B}, {0x2328, 0x2328}, {0x23CF, 0x23CF},
- {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, {0x24C2, 0x24C2},
- {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, {0x25C0, 0x25C0},
- {0x25FB, 0x25FE}, {0x2600, 0x2604}, {0x260E, 0x260E},
- {0x2611, 0x2611}, {0x2614, 0x2615}, {0x2618, 0x2618},
- {0x261D, 0x261D}, {0x2620, 0x2620}, {0x2622, 0x2623},
- {0x2626, 0x2626}, {0x262A, 0x262A}, {0x262E, 0x262F},
- {0x2638, 0x263A}, {0x2640, 0x2640}, {0x2642, 0x2642},
- {0x2648, 0x2653}, {0x265F, 0x2660}, {0x2663, 0x2663},
- {0x2665, 0x2666}, {0x2668, 0x2668}, {0x267B, 0x267B},
- {0x267E, 0x267F}, {0x2692, 0x2697}, {0x2699, 0x2699},
- {0x269B, 0x269C}, {0x26A0, 0x26A1}, {0x26AA, 0x26AB},
- {0x26B0, 0x26B1}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5},
- {0x26C8, 0x26C8}, {0x26CE, 0x26CF}, {0x26D1, 0x26D1},
- {0x26D3, 0x26D4}, {0x26E9, 0x26EA}, {0x26F0, 0x26F5},
- {0x26F7, 0x26FA}, {0x26FD, 0x26FD}, {0x2702, 0x2702},
- {0x2705, 0x2705}, {0x2708, 0x270D}, {0x270F, 0x270F},
- {0x2712, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716},
- {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728},
- {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747},
- {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755},
- {0x2757, 0x2757}, {0x2763, 0x2764}, {0x2795, 0x2797},
- {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF},
- {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C},
- {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030},
- {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299},
- {0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F170, 0x1F171},
- {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
- {0x1F1E6, 0x1F1FF}, {0x1F201, 0x1F202}, {0x1F21A, 0x1F21A},
- {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, {0x1F250, 0x1F251},
- {0x1F300, 0x1F321}, {0x1F324, 0x1F393}, {0x1F396, 0x1F397},
- {0x1F399, 0x1F39B}, {0x1F39E, 0x1F3F0}, {0x1F3F3, 0x1F3F5},
- {0x1F3F7, 0x1F4FD}, {0x1F4FF, 0x1F53D}, {0x1F549, 0x1F54E},
- {0x1F550, 0x1F567}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F57A},
- {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590},
- {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A5}, {0x1F5A8, 0x1F5A8},
- {0x1F5B1, 0x1F5B2}, {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4},
- {0x1F5D1, 0x1F5D3}, {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1},
- {0x1F5E3, 0x1F5E3}, {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF},
- {0x1F5F3, 0x1F5F3}, {0x1F5FA, 0x1F64F}, {0x1F680, 0x1F6C5},
- {0x1F6CB, 0x1F6D2}, {0x1F6E0, 0x1F6E5}, {0x1F6E9, 0x1F6E9},
- {0x1F6EB, 0x1F6EC}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F9},
- {0x1F910, 0x1F93A}, {0x1F93C, 0x1F93E}, {0x1F940, 0x1F945},
- {0x1F947, 0x1F970}, {0x1F973, 0x1F976}, {0x1F97A, 0x1F97A},
- {0x1F97C, 0x1F9A2}, {0x1F9B0, 0x1F9B9}, {0x1F9C0, 0x1F9C2},
- {0x1F9D0, 0x1F9FF},
-}
-
-var notassigned = table{
- {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B},
- {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530},
- {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588},
- {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF},
- {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D},
- {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF},
- {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F},
- {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5},
- {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E},
- {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1},
- {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6},
- {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB},
- {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00},
- {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12},
- {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34},
- {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D},
- {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50},
- {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65},
- {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E},
- {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1},
- {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6},
- {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF},
- {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00},
- {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12},
- {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34},
- {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A},
- {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E},
- {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84},
- {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98},
- {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2},
- {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD},
- {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF},
- {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF},
- {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11},
- {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45},
- {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57},
- {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77},
- {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91},
- {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB},
- {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4},
- {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5},
- {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04},
- {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C},
- {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53},
- {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84},
- {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC},
- {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE},
- {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5},
- {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E},
- {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86},
- {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93},
- {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4},
- {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC},
- {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5},
- {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB},
- {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70},
- {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD},
- {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC},
- {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F},
- {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F},
- {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1},
- {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1},
- {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311},
- {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F},
- {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF},
- {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D},
- {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F},
- {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F},
- {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF},
- {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F},
- {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F},
- {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943},
- {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF},
- {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D},
- {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F},
- {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF},
- {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB},
- {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF},
- {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF},
- {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F},
- {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58},
- {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E},
- {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5},
- {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1},
- {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065},
- {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F},
- {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F},
- {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F},
- {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC},
- {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF},
- {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8},
- {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F},
- {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F},
- {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7},
- {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF},
- {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F},
- {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF},
- {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098},
- {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F},
- {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F},
- {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF},
- {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F},
- {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6},
- {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F},
- {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF},
- {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE},
- {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F},
- {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA},
- {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10},
- {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F},
- {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF},
- {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF},
- {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12},
- {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D},
- {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45},
- {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91},
- {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F},
- {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F},
- {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00},
- {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1},
- {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7},
- {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C},
- {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E},
- {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF},
- {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F},
- {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F},
- {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF},
- {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F},
- {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF},
- {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7},
- {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E},
- {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F},
- {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809},
- {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E},
- {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF},
- {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E},
- {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB},
- {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B},
- {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37},
- {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F},
- {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF},
- {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77},
- {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF},
- {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9},
- {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051},
- {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF},
- {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F},
- {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0},
- {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F},
- {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E},
- {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF},
- {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E},
- {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331},
- {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346},
- {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356},
- {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F},
- {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C},
- {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F},
- {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F},
- {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF},
- {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F},
- {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF},
- {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37},
- {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91},
- {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF},
- {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF},
- {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F},
- {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF},
- {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F},
- {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C},
- {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E},
- {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF},
- {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F},
- {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B},
- {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128},
- {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F},
- {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D},
- {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8},
- {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC},
- {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C},
- {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A},
- {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549},
- {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD},
- {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF},
- {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022},
- {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6},
- {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D},
- {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20},
- {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28},
- {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A},
- {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48},
- {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50},
- {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58},
- {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E},
- {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66},
- {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78},
- {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A},
- {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA},
- {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F},
- {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0},
- {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F},
- {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5},
- {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F},
- {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF},
- {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF},
- {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F},
- {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F},
- {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F},
- {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF},
- {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F},
- {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000},
- {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF},
- {0xFFFFE, 0xFFFFF},
-}
-
-var neutral = table{
- {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9},
- {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB},
- {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6},
- {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7},
- {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1},
- {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD},
- {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112},
- {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A},
- {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E},
- {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C},
- {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A},
- {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1},
- {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7},
- {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250},
- {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6},
- {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF},
- {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE},
- {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F},
- {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390},
- {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400},
- {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
- {0x0531, 0x0556}, {0x0559, 0x055F}, {0x0561, 0x0587},
- {0x0589, 0x058A}, {0x058D, 0x058F}, {0x0591, 0x05C7},
- {0x05D0, 0x05EA}, {0x05F0, 0x05F4}, {0x0600, 0x061C},
- {0x061E, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1},
- {0x07C0, 0x07FA}, {0x0800, 0x082D}, {0x0830, 0x083E},
- {0x0840, 0x085B}, {0x085E, 0x085E}, {0x08A0, 0x08B4},
- {0x08B6, 0x08BD}, {0x08D4, 0x0983}, {0x0985, 0x098C},
- {0x098F, 0x0990}, {0x0993, 0x09A8}, {0x09AA, 0x09B0},
- {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, {0x09BC, 0x09C4},
- {0x09C7, 0x09C8}, {0x09CB, 0x09CE}, {0x09D7, 0x09D7},
- {0x09DC, 0x09DD}, {0x09DF, 0x09E3}, {0x09E6, 0x09FB},
- {0x0A01, 0x0A03}, {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10},
- {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, {0x0A32, 0x0A33},
- {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C},
- {0x0A3E, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D},
- {0x0A51, 0x0A51}, {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E},
- {0x0A66, 0x0A75}, {0x0A81, 0x0A83}, {0x0A85, 0x0A8D},
- {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0},
- {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5},
- {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0},
- {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AF9},
- {0x0B01, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10},
- {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33},
- {0x0B35, 0x0B39}, {0x0B3C, 0x0B44}, {0x0B47, 0x0B48},
- {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B5C, 0x0B5D},
- {0x0B5F, 0x0B63}, {0x0B66, 0x0B77}, {0x0B82, 0x0B83},
- {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95},
- {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F},
- {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9},
- {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD},
- {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA},
- {0x0C00, 0x0C03}, {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10},
- {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3D, 0x0C44},
- {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56},
- {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, {0x0C66, 0x0C6F},
- {0x0C78, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90},
- {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
- {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
- {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3},
- {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D01, 0x0D03},
- {0x0D05, 0x0D0C}, {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A},
- {0x0D3D, 0x0D44}, {0x0D46, 0x0D48}, {0x0D4A, 0x0D4F},
- {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, {0x0D82, 0x0D83},
- {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB},
- {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA},
- {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF},
- {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, {0x0E01, 0x0E3A},
- {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, {0x0E84, 0x0E84},
- {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, {0x0E8D, 0x0E8D},
- {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, {0x0EA1, 0x0EA3},
- {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, {0x0EAA, 0x0EAB},
- {0x0EAD, 0x0EB9}, {0x0EBB, 0x0EBD}, {0x0EC0, 0x0EC4},
- {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9},
- {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
- {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
- {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
- {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248},
- {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258},
- {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D},
- {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE},
- {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6},
- {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
- {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
- {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
- {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736},
- {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770},
- {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9},
- {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819},
- {0x1820, 0x1877}, {0x1880, 0x18AA}, {0x18B0, 0x18F5},
- {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B},
- {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974},
- {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA},
- {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C},
- {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD},
- {0x1AB0, 0x1ABE}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C},
- {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49},
- {0x1C4D, 0x1C88}, {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CF6},
- {0x1CF8, 0x1CF9}, {0x1D00, 0x1DF5}, {0x1DFB, 0x1F15},
- {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
- {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
- {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
- {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB},
- {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE},
- {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017},
- {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023},
- {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034},
- {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
- {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
- {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
- {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20F0},
- {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
- {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
- {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
- {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F},
- {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7},
- {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6},
- {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206},
- {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210},
- {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C},
- {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226},
- {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B},
- {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251},
- {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269},
- {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285},
- {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4},
- {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319},
- {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF},
- {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, {0x2400, 0x2426},
- {0x2440, 0x244A}, {0x24EA, 0x24EA}, {0x254C, 0x254F},
- {0x2574, 0x257F}, {0x2590, 0x2591}, {0x2596, 0x259F},
- {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, {0x25B4, 0x25B5},
- {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, {0x25C2, 0x25C5},
- {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, {0x25D2, 0x25E1},
- {0x25E6, 0x25EE}, {0x25F0, 0x25FC}, {0x25FF, 0x2604},
- {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613},
- {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F},
- {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F},
- {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B},
- {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692},
- {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9},
- {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2},
- {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709},
- {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B},
- {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756},
- {0x2758, 0x2775}, {0x2780, 0x2794}, {0x2798, 0x27AF},
- {0x27B1, 0x27BE}, {0x27C0, 0x27E5}, {0x27EE, 0x2984},
- {0x2987, 0x2B1A}, {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54},
- {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9},
- {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF},
- {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2CF3},
- {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D},
- {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, {0x2D7F, 0x2D96},
- {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6},
- {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE},
- {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2E44},
- {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B},
- {0xA640, 0xA6F7}, {0xA700, 0xA7AE}, {0xA7B0, 0xA7B7},
- {0xA7F7, 0xA82B}, {0xA830, 0xA839}, {0xA840, 0xA877},
- {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, {0xA8E0, 0xA8FD},
- {0xA900, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
- {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
- {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2},
- {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E},
- {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E},
- {0xAB30, 0xAB65}, {0xAB70, 0xABED}, {0xABF0, 0xABF9},
- {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
- {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
- {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
- {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F},
- {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD},
- {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
- {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
- {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
- {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA},
- {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E},
- {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD},
- {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB},
- {0x10300, 0x10323}, {0x10330, 0x1034A}, {0x10350, 0x1037A},
- {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
- {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
- {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
- {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755},
- {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808},
- {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
- {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF},
- {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B},
- {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7},
- {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06},
- {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33},
- {0x10A38, 0x10A3A}, {0x10A3F, 0x10A47}, {0x10A50, 0x10A58},
- {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6},
- {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72},
- {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF},
- {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
- {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x1104D},
- {0x11052, 0x1106F}, {0x1107F, 0x110C1}, {0x110D0, 0x110E8},
- {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11143},
- {0x11150, 0x11176}, {0x11180, 0x111CD}, {0x111D0, 0x111DF},
- {0x111E1, 0x111F4}, {0x11200, 0x11211}, {0x11213, 0x1123E},
- {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D},
- {0x1128F, 0x1129D}, {0x1129F, 0x112A9}, {0x112B0, 0x112EA},
- {0x112F0, 0x112F9}, {0x11300, 0x11303}, {0x11305, 0x1130C},
- {0x1130F, 0x11310}, {0x11313, 0x11328}, {0x1132A, 0x11330},
- {0x11332, 0x11333}, {0x11335, 0x11339}, {0x1133C, 0x11344},
- {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350},
- {0x11357, 0x11357}, {0x1135D, 0x11363}, {0x11366, 0x1136C},
- {0x11370, 0x11374}, {0x11400, 0x11459}, {0x1145B, 0x1145B},
- {0x1145D, 0x1145D}, {0x11480, 0x114C7}, {0x114D0, 0x114D9},
- {0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644},
- {0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B7},
- {0x116C0, 0x116C9}, {0x11700, 0x11719}, {0x1171D, 0x1172B},
- {0x11730, 0x1173F}, {0x118A0, 0x118F2}, {0x118FF, 0x118FF},
- {0x11AC0, 0x11AF8}, {0x11C00, 0x11C08}, {0x11C0A, 0x11C36},
- {0x11C38, 0x11C45}, {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F},
- {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, {0x12000, 0x12399},
- {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
- {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38},
- {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F},
- {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, {0x16B00, 0x16B45},
- {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, {0x16B63, 0x16B77},
- {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, {0x16F50, 0x16F7E},
- {0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C},
- {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3},
- {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8},
- {0x1D200, 0x1D245}, {0x1D300, 0x1D356}, {0x1D360, 0x1D371},
- {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
- {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
- {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
- {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
- {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
- {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
- {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B},
- {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
- {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
- {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
- {0x1E900, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
- {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22},
- {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32},
- {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B},
- {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49},
- {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52},
- {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59},
- {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F},
- {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A},
- {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C},
- {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B},
- {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB},
- {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B},
- {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF},
- {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C},
- {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF},
- {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D},
- {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF},
- {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F},
- {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A},
- {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
- {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
- {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA},
- {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4},
- {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859},
- {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001},
- {0xE0020, 0xE007F},
-}
-
-// Condition have flag EastAsianWidth whether the current locale is CJK or not.
-type Condition struct {
- EastAsianWidth bool
- ZeroWidthJoiner bool
-}
-
-// NewCondition return new instance of Condition which is current locale.
-func NewCondition() *Condition {
- return &Condition{
- EastAsianWidth: EastAsianWidth,
- ZeroWidthJoiner: ZeroWidthJoiner,
- }
-}
-
-// RuneWidth returns the number of cells in r.
-// See http://www.unicode.org/reports/tr11/
-func (c *Condition) RuneWidth(r rune) int {
- switch {
- case r < 0 || r > 0x10FFFF ||
- inTables(r, nonprint, combining, notassigned):
- return 0
- case (c.EastAsianWidth && IsAmbiguousWidth(r)) ||
- inTables(r, doublewidth, emoji):
- return 2
- default:
- return 1
- }
-}
-
-func (c *Condition) stringWidth(s string) (width int) {
- for _, r := range []rune(s) {
- width += c.RuneWidth(r)
- }
- return width
-}
-
-func (c *Condition) stringWidthZeroJoiner(s string) (width int) {
- r1, r2 := rune(0), rune(0)
- for _, r := range []rune(s) {
- if r == 0xFE0E || r == 0xFE0F {
- continue
- }
- w := c.RuneWidth(r)
- if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) {
- w = 0
- }
- width += w
- r1, r2 = r2, r
- }
- return width
-}
-
-// StringWidth return width as you can see
-func (c *Condition) StringWidth(s string) (width int) {
- if c.ZeroWidthJoiner {
- return c.stringWidthZeroJoiner(s)
- }
- return c.stringWidth(s)
-}
-
-// Truncate return string truncated with w cells
-func (c *Condition) Truncate(s string, w int, tail string) string {
- if c.StringWidth(s) <= w {
- return s
- }
- r := []rune(s)
- tw := c.StringWidth(tail)
- w -= tw
- width := 0
- i := 0
- for ; i < len(r); i++ {
- cw := c.RuneWidth(r[i])
- if width+cw > w {
- break
- }
- width += cw
- }
- return string(r[0:i]) + tail
-}
-
-// Wrap return string wrapped with w cells
-func (c *Condition) Wrap(s string, w int) string {
- width := 0
- out := ""
- for _, r := range []rune(s) {
- cw := RuneWidth(r)
- if r == '\n' {
- out += string(r)
- width = 0
- continue
- } else if width+cw > w {
- out += "\n"
- width = 0
- out += string(r)
- width += cw
- continue
- }
- out += string(r)
- width += cw
- }
- return out
-}
-
-// FillLeft return string filled in left by spaces in w cells
-func (c *Condition) FillLeft(s string, w int) string {
- width := c.StringWidth(s)
- count := w - width
- if count > 0 {
- b := make([]byte, count)
- for i := range b {
- b[i] = ' '
- }
- return string(b) + s
- }
- return s
-}
-
-// FillRight return string filled in left by spaces in w cells
-func (c *Condition) FillRight(s string, w int) string {
- width := c.StringWidth(s)
- count := w - width
- if count > 0 {
- b := make([]byte, count)
- for i := range b {
- b[i] = ' '
- }
- return s + string(b)
- }
- return s
-}
-
-// RuneWidth returns the number of cells in r.
-// See http://www.unicode.org/reports/tr11/
-func RuneWidth(r rune) int {
- return DefaultCondition.RuneWidth(r)
-}
-
-// IsAmbiguousWidth returns whether is ambiguous width or not.
-func IsAmbiguousWidth(r rune) bool {
- return inTables(r, private, ambiguous)
-}
-
-// IsNeutralWidth returns whether is neutral width or not.
-func IsNeutralWidth(r rune) bool {
- return inTable(r, neutral)
-}
-
-// StringWidth return width as you can see
-func StringWidth(s string) (width int) {
- return DefaultCondition.StringWidth(s)
-}
-
-// Truncate return string truncated with w cells
-func Truncate(s string, w int, tail string) string {
- return DefaultCondition.Truncate(s, w, tail)
-}
-
-// Wrap return string wrapped with w cells
-func Wrap(s string, w int) string {
- return DefaultCondition.Wrap(s, w)
-}
-
-// FillLeft return string filled in left by spaces in w cells
-func FillLeft(s string, w int) string {
- return DefaultCondition.FillLeft(s, w)
-}
-
-// FillRight return string filled in left by spaces in w cells
-func FillRight(s string, w int) string {
- return DefaultCondition.FillRight(s, w)
-}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
deleted file mode 100644
index 7d99f6e52..000000000
--- a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build appengine
-
-package runewidth
-
-// IsEastAsian return true if the current locale is CJK
-func IsEastAsian() bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
deleted file mode 100644
index c5fdf40ba..000000000
--- a/vendor/github.com/mattn/go-runewidth/runewidth_js.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build js
-// +build !appengine
-
-package runewidth
-
-func IsEastAsian() bool {
- // TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
- return false
-}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
deleted file mode 100644
index 66a58b5d8..000000000
--- a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// +build !windows
-// +build !js
-// +build !appengine
-
-package runewidth
-
-import (
- "os"
- "regexp"
- "strings"
-)
-
-var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
-
-var mblenTable = map[string]int{
- "utf-8": 6,
- "utf8": 6,
- "jis": 8,
- "eucjp": 3,
- "euckr": 2,
- "euccn": 2,
- "sjis": 2,
- "cp932": 2,
- "cp51932": 2,
- "cp936": 2,
- "cp949": 2,
- "cp950": 2,
- "big5": 2,
- "gbk": 2,
- "gb2312": 2,
-}
-
-func isEastAsian(locale string) bool {
- charset := strings.ToLower(locale)
- r := reLoc.FindStringSubmatch(locale)
- if len(r) == 2 {
- charset = strings.ToLower(r[1])
- }
-
- if strings.HasSuffix(charset, "@cjk_narrow") {
- return false
- }
-
- for pos, b := range []byte(charset) {
- if b == '@' {
- charset = charset[:pos]
- break
- }
- }
- max := 1
- if m, ok := mblenTable[charset]; ok {
- max = m
- }
- if max > 1 && (charset[0] != 'u' ||
- strings.HasPrefix(locale, "ja") ||
- strings.HasPrefix(locale, "ko") ||
- strings.HasPrefix(locale, "zh")) {
- return true
- }
- return false
-}
-
-// IsEastAsian return true if the current locale is CJK
-func IsEastAsian() bool {
- locale := os.Getenv("LC_CTYPE")
- if locale == "" {
- locale = os.Getenv("LANG")
- }
-
- // ignore C locale
- if locale == "POSIX" || locale == "C" {
- return false
- }
- if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
- return false
- }
-
- return isEastAsian(locale)
-}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
deleted file mode 100644
index d6a61777d..000000000
--- a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build windows
-// +build !appengine
-
-package runewidth
-
-import (
- "syscall"
-)
-
-var (
- kernel32 = syscall.NewLazyDLL("kernel32")
- procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
-)
-
-// IsEastAsian return true if the current locale is CJK
-func IsEastAsian() bool {
- r1, _, _ := procGetConsoleOutputCP.Call()
- if r1 == 0 {
- return false
- }
-
- switch int(r1) {
- case 932, 51932, 936, 949, 950:
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/openshift/imagebuilder/README.md b/vendor/github.com/openshift/imagebuilder/README.md
index 2f9c110dd..f26b4a7e0 100644
--- a/vendor/github.com/openshift/imagebuilder/README.md
+++ b/vendor/github.com/openshift/imagebuilder/README.md
@@ -70,7 +70,7 @@ is ignored.
## Code Example
-```
+```go
f, err := os.Open("path/to/Dockerfile")
if err != nil {
return err
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
index d37965df6..16682af7d 100644
--- a/vendor/github.com/openshift/imagebuilder/builder.go
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -40,6 +40,7 @@ type Run struct {
type Executor interface {
Preserve(path string) error
+ EnsureContainerPath(path string) error
Copy(excludes []string, copies ...Copy) error
Run(run Run, config docker.Config) error
UnrecognizedInstruction(step *Step) error
@@ -52,6 +53,11 @@ func (logExecutor) Preserve(path string) error {
return nil
}
+func (logExecutor) EnsureContainerPath(path string) error {
+ log.Printf("ENSURE %s", path)
+ return nil
+}
+
func (logExecutor) Copy(excludes []string, copies ...Copy) error {
for _, c := range copies {
log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s", c.Src, c.Dest, c.From, c.Download, c.Chown)
@@ -75,6 +81,10 @@ func (noopExecutor) Preserve(path string) error {
return nil
}
+func (noopExecutor) EnsureContainerPath(path string) error {
+ return nil
+}
+
func (noopExecutor) Copy(excludes []string, copies ...Copy) error {
return nil
}
@@ -153,6 +163,7 @@ func (stages Stages) ByName(name string) (Stage, bool) {
return Stage{}, false
}
+// Get just the target stage.
func (stages Stages) ByTarget(target string) (Stages, bool) {
if len(target) == 0 {
return stages, true
@@ -165,6 +176,19 @@ func (stages Stages) ByTarget(target string) (Stages, bool) {
return nil, false
}
+// Get all the stages up to and including the target.
+func (stages Stages) ThroughTarget(target string) (Stages, bool) {
+ if len(target) == 0 {
+ return stages, true
+ }
+ for i, stage := range stages {
+ if stage.Name == target {
+ return stages[0 : i+1], true
+ }
+ }
+ return nil, false
+}
+
type Stage struct {
Position int
Name string
@@ -319,6 +343,13 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error {
if err := exec.Copy(b.Excludes, copies...); err != nil {
return err
}
+
+ if len(b.RunConfig.WorkingDir) > 0 {
+ if err := exec.EnsureContainerPath(b.RunConfig.WorkingDir); err != nil {
+ return err
+ }
+ }
+
for _, run := range runs {
config := b.Config()
config.Env = step.Env
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
index f6510c2fd..ff365848a 100644
--- a/vendor/github.com/openshift/imagebuilder/dispatchers.go
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -128,9 +128,20 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
if len(args) < 2 {
return errAtLeastOneArgument("ADD")
}
+ var chown string
last := len(args) - 1
dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
- b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0:last], Dest: dest, Download: true})
+ if len(flagArgs) > 0 {
+ for _, arg := range flagArgs {
+ switch {
+ case strings.HasPrefix(arg, "--chown="):
+ chown = strings.TrimPrefix(arg, "--chown=")
+ default:
+ return fmt.Errorf("ADD only supports the --chown=<uid:gid> flag")
+ }
+ }
+ }
+ b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0:last], Dest: dest, Download: true, Chown: chown})
return nil
}
diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE
new file mode 100644
index 000000000..f0027349e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 The OpenTracing Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md
new file mode 100644
index 000000000..6ef1d7c9d
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/README.md
@@ -0,0 +1,171 @@
+[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
+
+# OpenTracing API for Go
+
+This package is a Go platform API for OpenTracing.
+
+## Required Reading
+
+In order to understand the Go platform API, one must first be familiar with the
+[OpenTracing project](https://opentracing.io) and
+[terminology](https://opentracing.io/specification/) more specifically.
+
+## API overview for those adding instrumentation
+
+Everyday consumers of this `opentracing` package really only need to worry
+about a couple of key abstractions: the `StartSpan` function, the `Span`
+interface, and binding a `Tracer` at `main()`-time. Here are code snippets
+demonstrating some important use cases.
+
+#### Singleton initialization
+
+The simplest starting point is `./default_tracer.go`. As early as possible, call
+
+```go
+ import "github.com/opentracing/opentracing-go"
+ import ".../some_tracing_impl"
+
+ func main() {
+ opentracing.SetGlobalTracer(
+ // tracing impl specific:
+ some_tracing_impl.New(...),
+ )
+ ...
+ }
+```
+
+#### Non-Singleton initialization
+
+If you prefer direct control to singletons, manage ownership of the
+`opentracing.Tracer` implementation explicitly.
+
+#### Creating a Span given an existing Go `context.Context`
+
+If you use `context.Context` in your application, OpenTracing's Go library will
+happily rely on it for `Span` propagation. To start a new (blocking child)
+`Span`, you can use `StartSpanFromContext`.
+
+```go
+ func xyz(ctx context.Context, ...) {
+ ...
+ span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
+ defer span.Finish()
+ span.LogFields(
+ log.String("event", "soft error"),
+ log.String("type", "cache timeout"),
+ log.Int("waited.millis", 1500))
+ ...
+ }
+```
+
+#### Starting an empty trace by creating a "root span"
+
+It's always possible to create a "root" `Span` with no parent or other causal
+reference.
+
+```go
+ func xyz() {
+ ...
+ sp := opentracing.StartSpan("operation_name")
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Creating a (child) Span given an existing (parent) Span
+
+```go
+ func xyz(parentSpan opentracing.Span, ...) {
+ ...
+ sp := opentracing.StartSpan(
+ "operation_name",
+ opentracing.ChildOf(parentSpan.Context()))
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Serializing to the wire
+
+```go
+ func makeSomeRequest(ctx context.Context) ... {
+ if span := opentracing.SpanFromContext(ctx); span != nil {
+ httpClient := &http.Client{}
+ httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
+
+ // Transmit the span's TraceContext as HTTP headers on our
+ // outbound request.
+ opentracing.GlobalTracer().Inject(
+ span.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(httpReq.Header))
+
+ resp, err := httpClient.Do(httpReq)
+ ...
+ }
+ ...
+ }
+```
+
+#### Deserializing from the wire
+
+```go
+ http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ var serverSpan opentracing.Span
+ appSpecificOperationName := ...
+ wireContext, err := opentracing.GlobalTracer().Extract(
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+ if err != nil {
+ // Optionally record something about err here
+ }
+
+ // Create the span referring to the RPC client if available.
+ // If wireContext == nil, a root span will be created.
+ serverSpan = opentracing.StartSpan(
+ appSpecificOperationName,
+ ext.RPCServerOption(wireContext))
+
+ defer serverSpan.Finish()
+
+ ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
+ ...
+ }
+```
+
+#### Conditionally capture a field using `log.Noop`
+
+In some situations, you may want to dynamically decide whether or not
+to log a field. For example, you may want to capture additional data,
+such as a customer ID, in non-production environments:
+
+```go
+ func Customer(order *Order) log.Field {
+ if os.Getenv("ENVIRONMENT") == "dev" {
+ return log.String("customer", order.Customer.ID)
+ }
+ return log.Noop()
+ }
+```
+
+#### Goroutine-safety
+
+The entire public API is goroutine-safe and does not require external
+synchronization.
+
+## API pointers for those implementing a tracing system
+
+Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
+
+## API compatibility
+
+For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
+
+## Tracer test suite
+
+A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
+
+## Licensing
+
+[Apache 2.0 License](./LICENSE).
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
new file mode 100644
index 000000000..52e889582
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
@@ -0,0 +1,210 @@
+package ext
+
+import "github.com/opentracing/opentracing-go"
+
+// These constants define common tag names recommended for better portability across
+// tracing systems and languages/platforms.
+//
+// The tag names are defined as typed strings, so that in addition to the usual use
+//
+// span.setTag(TagName, value)
+//
+// they also support value type validation via this additional syntax:
+//
+// TagName.Set(span, value)
+//
+var (
+ //////////////////////////////////////////////////////////////////////
+ // SpanKind (client/server or producer/consumer)
+ //////////////////////////////////////////////////////////////////////
+
+ // SpanKind hints at relationship between spans, e.g. client/server
+ SpanKind = spanKindTagName("span.kind")
+
+ // SpanKindRPCClient marks a span representing the client-side of an RPC
+ // or other remote call
+ SpanKindRPCClientEnum = SpanKindEnum("client")
+ SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
+
+ // SpanKindRPCServer marks a span representing the server-side of an RPC
+ // or other remote call
+ SpanKindRPCServerEnum = SpanKindEnum("server")
+ SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
+
+ // SpanKindProducer marks a span representing the producer-side of a
+ // message bus
+ SpanKindProducerEnum = SpanKindEnum("producer")
+ SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
+
+ // SpanKindConsumer marks a span representing the consumer-side of a
+ // message bus
+ SpanKindConsumerEnum = SpanKindEnum("consumer")
+ SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
+
+ //////////////////////////////////////////////////////////////////////
+ // Component name
+ //////////////////////////////////////////////////////////////////////
+
+ // Component is a low-cardinality identifier of the module, library,
+ // or package that is generating a span.
+ Component = stringTagName("component")
+
+ //////////////////////////////////////////////////////////////////////
+ // Sampling hint
+ //////////////////////////////////////////////////////////////////////
+
+ // SamplingPriority determines the priority of sampling this Span.
+ SamplingPriority = uint16TagName("sampling.priority")
+
+ //////////////////////////////////////////////////////////////////////
+ // Peer tags. These tags can be emitted by either client-side of
+ // server-side to describe the other side/service in a peer-to-peer
+ // communications, like an RPC call.
+ //////////////////////////////////////////////////////////////////////
+
+ // PeerService records the service name of the peer.
+ PeerService = stringTagName("peer.service")
+
+ // PeerAddress records the address name of the peer. This may be a "ip:port",
+ // a bare "hostname", a FQDN or even a database DSN substring
+ // like "mysql://username@127.0.0.1:3306/dbname"
+ PeerAddress = stringTagName("peer.address")
+
+ // PeerHostname records the host name of the peer
+ PeerHostname = stringTagName("peer.hostname")
+
+ // PeerHostIPv4 records IP v4 host address of the peer
+ PeerHostIPv4 = ipv4Tag("peer.ipv4")
+
+ // PeerHostIPv6 records IP v6 host address of the peer
+ PeerHostIPv6 = stringTagName("peer.ipv6")
+
+ // PeerPort records port number of the peer
+ PeerPort = uint16TagName("peer.port")
+
+ //////////////////////////////////////////////////////////////////////
+ // HTTP Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // HTTPUrl should be the URL of the request being handled in this segment
+ // of the trace, in standard URI format. The protocol is optional.
+ HTTPUrl = stringTagName("http.url")
+
+ // HTTPMethod is the HTTP method of the request, and is case-insensitive.
+ HTTPMethod = stringTagName("http.method")
+
+ // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
+ // HTTP response.
+ HTTPStatusCode = uint16TagName("http.status_code")
+
+ //////////////////////////////////////////////////////////////////////
+ // DB Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // DBInstance is database instance name.
+ DBInstance = stringTagName("db.instance")
+
+ // DBStatement is a database statement for the given database type.
+ // It can be a query or a prepared statement (i.e., before substitution).
+ DBStatement = stringTagName("db.statement")
+
+ // DBType is a database type. For any SQL database, "sql".
+ // For others, the lower-case database category, e.g. "redis"
+ DBType = stringTagName("db.type")
+
+ // DBUser is a username for accessing database.
+ DBUser = stringTagName("db.user")
+
+ //////////////////////////////////////////////////////////////////////
+ // Message Bus Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // MessageBusDestination is an address at which messages can be exchanged
+ MessageBusDestination = stringTagName("message_bus.destination")
+
+ //////////////////////////////////////////////////////////////////////
+ // Error Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // Error indicates that operation represented by the span resulted in an error.
+ Error = boolTagName("error")
+)
+
+// ---
+
+// SpanKindEnum represents common span types
+type SpanKindEnum string
+
+type spanKindTagName string
+
+// Set adds a string tag to the `span`
+func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
+ span.SetTag(string(tag), value)
+}
+
+type rpcServerOption struct {
+ clientContext opentracing.SpanContext
+}
+
+func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
+ if r.clientContext != nil {
+ opentracing.ChildOf(r.clientContext).Apply(o)
+ }
+ SpanKindRPCServer.Apply(o)
+}
+
+// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
+// with `client` representing the metadata for the remote peer Span if available.
+// In case client == nil, due to the client not being instrumented, this RPC
+// server span will be a root span.
+func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
+ return rpcServerOption{client}
+}
+
+// ---
+
+type stringTagName string
+
+// Set adds a string tag to the `span`
+func (tag stringTagName) Set(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+type uint32TagName string
+
+// Set adds a uint32 tag to the `span`
+func (tag uint32TagName) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+type uint16TagName string
+
+// Set adds a uint16 tag to the `span`
+func (tag uint16TagName) Set(span opentracing.Span, value uint16) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+type boolTagName string
+
+// Add adds a bool tag to the `span`
+func (tag boolTagName) Set(span opentracing.Span, value bool) {
+ span.SetTag(string(tag), value)
+}
+
+type ipv4Tag string
+
+// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
+func (tag ipv4Tag) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
+func (tag ipv4Tag) SetString(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
new file mode 100644
index 000000000..4f7066a92
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
@@ -0,0 +1,42 @@
+package opentracing
+
+type registeredTracer struct {
+ tracer Tracer
+ isRegistered bool
+}
+
+var (
+ globalTracer = registeredTracer{NoopTracer{}, false}
+)
+
+// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
+// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
+// opentracing.Tracer instance) should call SetGlobalTracer as early as
+// possible in main(), prior to calling the `StartSpan` global func below.
+// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
+// (etc) globals are noops.
+func SetGlobalTracer(tracer Tracer) {
+ globalTracer = registeredTracer{tracer, true}
+}
+
+// GlobalTracer returns the global singleton `Tracer` implementation.
+// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
+// implementation that drops all data handed to it.
+func GlobalTracer() Tracer {
+ return globalTracer.tracer
+}
+
+// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return globalTracer.tracer.StartSpan(operationName, opts...)
+}
+
+// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
+func InitGlobalTracer(tracer Tracer) {
+ SetGlobalTracer(tracer)
+}
+
+// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
+func IsGlobalTracerRegistered() bool {
+ return globalTracer.isRegistered
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
new file mode 100644
index 000000000..05a62e70b
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -0,0 +1,54 @@
+package opentracing
+
+import "context"
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a new `context.Context` that holds a reference to
+// `span`'s SpanContext.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+ return context.WithValue(ctx, activeSpanKey, span)
+}
+
+// SpanFromContext returns the `Span` previously associated with `ctx`, or
+// `nil` if no such `Span` could be found.
+//
+// NOTE: context.Context != SpanContext: the former is Go's intra-process
+// context propagation mechanism, and the latter houses OpenTracing's per-Span
+// identity and baggage information.
+func SpanFromContext(ctx context.Context) Span {
+ val := ctx.Value(activeSpanKey)
+ if sp, ok := val.(Span); ok {
+ return sp
+ }
+ return nil
+}
+
+// StartSpanFromContext starts and returns a Span with `operationName`, using
+// any Span found within `ctx` as a ChildOfRef. If no such parent could be
+// found, StartSpanFromContext creates a root (parentless) Span.
+//
+// The second return value is a context.Context object built around the
+// returned Span.
+//
+// Example usage:
+//
+// SomeFunction(ctx context.Context, ...) {
+// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
+// defer sp.Finish()
+// ...
+// }
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+}
+
+// startSpanFromContextWithTracer is factored out for testing purposes.
+func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+ opts = append(opts, ChildOf(parentSpan.Context()))
+ }
+ span := tracer.StartSpan(operationName, opts...)
+ return span, ContextWithSpan(ctx, span)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
new file mode 100644
index 000000000..50feea341
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -0,0 +1,269 @@
+package log
+
+import (
+ "fmt"
+ "math"
+)
+
+type fieldType int
+
+const (
+ stringType fieldType = iota
+ boolType
+ intType
+ int32Type
+ uint32Type
+ int64Type
+ uint64Type
+ float32Type
+ float64Type
+ errorType
+ objectType
+ lazyLoggerType
+ noopType
+)
+
+// Field instances are constructed via LogBool, LogString, and so on.
+// Tracing implementations may then handle them via the Field.Marshal
+// method.
+//
+// "heavily influenced by" (i.e., partially stolen from)
+// https://github.com/uber-go/zap
+type Field struct {
+ key string
+ fieldType fieldType
+ numericVal int64
+ stringVal string
+ interfaceVal interface{}
+}
+
+// String adds a string-valued key:value pair to a Span.LogFields() record
+func String(key, val string) Field {
+ return Field{
+ key: key,
+ fieldType: stringType,
+ stringVal: val,
+ }
+}
+
+// Bool adds a bool-valued key:value pair to a Span.LogFields() record
+func Bool(key string, val bool) Field {
+ var numericVal int64
+ if val {
+ numericVal = 1
+ }
+ return Field{
+ key: key,
+ fieldType: boolType,
+ numericVal: numericVal,
+ }
+}
+
+// Int adds an int-valued key:value pair to a Span.LogFields() record
+func Int(key string, val int) Field {
+ return Field{
+ key: key,
+ fieldType: intType,
+ numericVal: int64(val),
+ }
+}
+
+// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
+func Int32(key string, val int32) Field {
+ return Field{
+ key: key,
+ fieldType: int32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
+func Int64(key string, val int64) Field {
+ return Field{
+ key: key,
+ fieldType: int64Type,
+ numericVal: val,
+ }
+}
+
+// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
+func Uint32(key string, val uint32) Field {
+ return Field{
+ key: key,
+ fieldType: uint32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
+func Uint64(key string, val uint64) Field {
+ return Field{
+ key: key,
+ fieldType: uint64Type,
+ numericVal: int64(val),
+ }
+}
+
+// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
+func Float32(key string, val float32) Field {
+ return Field{
+ key: key,
+ fieldType: float32Type,
+ numericVal: int64(math.Float32bits(val)),
+ }
+}
+
+// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
+func Float64(key string, val float64) Field {
+ return Field{
+ key: key,
+ fieldType: float64Type,
+ numericVal: int64(math.Float64bits(val)),
+ }
+}
+
+// Error adds an error with the key "error" to a Span.LogFields() record
+func Error(err error) Field {
+ return Field{
+ key: "error",
+ fieldType: errorType,
+ interfaceVal: err,
+ }
+}
+
+// Object adds an object-valued key:value pair to a Span.LogFields() record
+func Object(key string, obj interface{}) Field {
+ return Field{
+ key: key,
+ fieldType: objectType,
+ interfaceVal: obj,
+ }
+}
+
+// LazyLogger allows for user-defined, late-bound logging of arbitrary data
+type LazyLogger func(fv Encoder)
+
+// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
+// implementation will call the LazyLogger function at an indefinite time in
+// the future (after Lazy() returns).
+func Lazy(ll LazyLogger) Field {
+ return Field{
+ fieldType: lazyLoggerType,
+ interfaceVal: ll,
+ }
+}
+
+// Noop creates a no-op log field that should be ignored by the tracer.
+// It can be used to capture optional fields, for example those that should
+// only be logged in non-production environment:
+//
+// func customerField(order *Order) log.Field {
+// if os.Getenv("ENVIRONMENT") == "dev" {
+// return log.String("customer", order.Customer.ID)
+// }
+// return log.Noop()
+// }
+//
+// span.LogFields(log.String("event", "purchase"), customerField(order))
+//
+func Noop() Field {
+ return Field{
+ fieldType: noopType,
+ }
+}
+
+// Encoder allows access to the contents of a Field (via a call to
+// Field.Marshal).
+//
+// Tracer implementations typically provide an implementation of Encoder;
+// OpenTracing callers typically do not need to concern themselves with it.
+type Encoder interface {
+ EmitString(key, value string)
+ EmitBool(key string, value bool)
+ EmitInt(key string, value int)
+ EmitInt32(key string, value int32)
+ EmitInt64(key string, value int64)
+ EmitUint32(key string, value uint32)
+ EmitUint64(key string, value uint64)
+ EmitFloat32(key string, value float32)
+ EmitFloat64(key string, value float64)
+ EmitObject(key string, value interface{})
+ EmitLazyLogger(value LazyLogger)
+}
+
+// Marshal passes a Field instance through to the appropriate
+// field-type-specific method of an Encoder.
+func (lf Field) Marshal(visitor Encoder) {
+ switch lf.fieldType {
+ case stringType:
+ visitor.EmitString(lf.key, lf.stringVal)
+ case boolType:
+ visitor.EmitBool(lf.key, lf.numericVal != 0)
+ case intType:
+ visitor.EmitInt(lf.key, int(lf.numericVal))
+ case int32Type:
+ visitor.EmitInt32(lf.key, int32(lf.numericVal))
+ case int64Type:
+ visitor.EmitInt64(lf.key, int64(lf.numericVal))
+ case uint32Type:
+ visitor.EmitUint32(lf.key, uint32(lf.numericVal))
+ case uint64Type:
+ visitor.EmitUint64(lf.key, uint64(lf.numericVal))
+ case float32Type:
+ visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
+ case float64Type:
+ visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
+ case errorType:
+ if err, ok := lf.interfaceVal.(error); ok {
+ visitor.EmitString(lf.key, err.Error())
+ } else {
+ visitor.EmitString(lf.key, "<nil>")
+ }
+ case objectType:
+ visitor.EmitObject(lf.key, lf.interfaceVal)
+ case lazyLoggerType:
+ visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
+ case noopType:
+ // intentionally left blank
+ }
+}
+
+// Key returns the field's key.
+func (lf Field) Key() string {
+ return lf.key
+}
+
+// Value returns the field's value as interface{}.
+func (lf Field) Value() interface{} {
+ switch lf.fieldType {
+ case stringType:
+ return lf.stringVal
+ case boolType:
+ return lf.numericVal != 0
+ case intType:
+ return int(lf.numericVal)
+ case int32Type:
+ return int32(lf.numericVal)
+ case int64Type:
+ return int64(lf.numericVal)
+ case uint32Type:
+ return uint32(lf.numericVal)
+ case uint64Type:
+ return uint64(lf.numericVal)
+ case float32Type:
+ return math.Float32frombits(uint32(lf.numericVal))
+ case float64Type:
+ return math.Float64frombits(uint64(lf.numericVal))
+ case errorType, objectType, lazyLoggerType:
+ return lf.interfaceVal
+ case noopType:
+ return nil
+ default:
+ return nil
+ }
+}
+
+// String returns a string representation of the key and value.
+func (lf Field) String() string {
+ return fmt.Sprint(lf.key, ":", lf.Value())
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
new file mode 100644
index 000000000..3832feb5c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -0,0 +1,54 @@
+package log
+
+import "fmt"
+
+// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
+// a la Span.LogFields().
+func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
+ if len(keyValues)%2 != 0 {
+ return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
+ }
+ fields := make([]Field, len(keyValues)/2)
+ for i := 0; i*2 < len(keyValues); i++ {
+ key, ok := keyValues[i*2].(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "non-string key (pair #%d): %T",
+ i, keyValues[i*2])
+ }
+ switch typedVal := keyValues[i*2+1].(type) {
+ case bool:
+ fields[i] = Bool(key, typedVal)
+ case string:
+ fields[i] = String(key, typedVal)
+ case int:
+ fields[i] = Int(key, typedVal)
+ case int8:
+ fields[i] = Int32(key, int32(typedVal))
+ case int16:
+ fields[i] = Int32(key, int32(typedVal))
+ case int32:
+ fields[i] = Int32(key, typedVal)
+ case int64:
+ fields[i] = Int64(key, typedVal)
+ case uint:
+ fields[i] = Uint64(key, uint64(typedVal))
+ case uint64:
+ fields[i] = Uint64(key, typedVal)
+ case uint8:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint16:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint32:
+ fields[i] = Uint32(key, typedVal)
+ case float32:
+ fields[i] = Float32(key, typedVal)
+ case float64:
+ fields[i] = Float64(key, typedVal)
+ default:
+ // When in doubt, coerce to a string
+ fields[i] = String(key, fmt.Sprint(typedVal))
+ }
+ }
+ return fields, nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
new file mode 100644
index 000000000..0d32f692c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -0,0 +1,64 @@
+package opentracing
+
+import "github.com/opentracing/opentracing-go/log"
+
+// A NoopTracer is a trivial, minimum overhead implementation of Tracer
+// for which all operations are no-ops.
+//
+// The primary use of this implementation is in libraries, such as RPC
+// frameworks, that make tracing an optional feature controlled by the
+// end user. A no-op implementation allows said libraries to use it
+// as the default Tracer and to write instrumentation that does
+// not need to keep checking if the tracer instance is nil.
+//
+// For the same reason, the NoopTracer is the default "global" tracer
+// (see GlobalTracer and SetGlobalTracer functions).
+//
+// WARNING: NoopTracer does not support baggage propagation.
+type NoopTracer struct{}
+
+type noopSpan struct{}
+type noopSpanContext struct{}
+
+var (
+ defaultNoopSpanContext = noopSpanContext{}
+ defaultNoopSpan = noopSpan{}
+ defaultNoopTracer = NoopTracer{}
+)
+
+const (
+ emptyString = ""
+)
+
+// noopSpanContext:
+func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
+
+// noopSpan:
+func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
+func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
+func (n noopSpan) BaggageItem(key string) string { return emptyString }
+func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
+func (n noopSpan) LogFields(fields ...log.Field) {}
+func (n noopSpan) LogKV(keyVals ...interface{}) {}
+func (n noopSpan) Finish() {}
+func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
+func (n noopSpan) SetOperationName(operationName string) Span { return n }
+func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
+func (n noopSpan) LogEvent(event string) {}
+func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) Log(data LogData) {}
+
+// StartSpan belongs to the Tracer interface.
+func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return defaultNoopSpan
+}
+
+// Inject belongs to the Tracer interface.
+func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
+ return nil
+}
+
+// Extract belongs to the Tracer interface.
+func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
+ return nil, ErrSpanContextNotFound
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go
new file mode 100644
index 000000000..b0c275eb0
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/propagation.go
@@ -0,0 +1,176 @@
+package opentracing
+
+import (
+ "errors"
+ "net/http"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// CORE PROPAGATION INTERFACES:
+///////////////////////////////////////////////////////////////////////////////
+
+var (
+ // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
+ // Tracer.Extract() is not recognized by the Tracer implementation.
+ ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
+
+ // ErrSpanContextNotFound occurs when the `carrier` passed to
+ // Tracer.Extract() is valid and uncorrupted but has insufficient
+ // information to extract a SpanContext.
+ ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
+
+ // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
+ // operate on a SpanContext which it is not prepared to handle (for
+ // example, since it was created by a different tracer implementation).
+ ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
+
+ // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
+ // implementations expect a different type of `carrier` than they are
+ // given.
+ ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
+
+ // ErrSpanContextCorrupted occurs when the `carrier` passed to
+ // Tracer.Extract() is of the expected type but is corrupted.
+ ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// BUILTIN PROPAGATION FORMATS:
+///////////////////////////////////////////////////////////////////////////////
+
+// BuiltinFormat is used to demarcate the values within package `opentracing`
+// that are intended for use with the Tracer.Inject() and Tracer.Extract()
+// methods.
+type BuiltinFormat byte
+
+const (
+ // Binary represents SpanContexts as opaque binary data.
+ //
+ // For Tracer.Inject(): the carrier must be an `io.Writer`.
+ //
+ // For Tracer.Extract(): the carrier must be an `io.Reader`.
+ Binary BuiltinFormat = iota
+
+ // TextMap represents SpanContexts as key:value string pairs.
+ //
+ // Unlike HTTPHeaders, the TextMap format does not restrict the key or
+ // value character sets in any way.
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ TextMap
+
+ // HTTPHeaders represents SpanContexts as HTTP header string pairs.
+ //
+ // Unlike TextMap, the HTTPHeaders format requires that the keys and values
+ // be valid as HTTP headers as-is (i.e., character casing may be unstable
+ // and special characters are disallowed in keys, values should be
+ // URL-escaped, etc).
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ //
+ // See HTTPHeadersCarrier for an implementation of both TextMapWriter
+ // and TextMapReader that defers to an http.Header instance for storage.
+ // For example, Inject():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := span.Tracer().Inject(
+ // span.Context(), opentracing.HTTPHeaders, carrier)
+ //
+ // Or Extract():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(
+ // opentracing.HTTPHeaders, carrier)
+ //
+ HTTPHeaders
+)
+
+// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
+// it, the caller can encode a SpanContext for propagation as entries in a map
+// of unicode strings.
+type TextMapWriter interface {
+ // Set a key:value pair to the carrier. Multiple calls to Set() for the
+ // same key leads to undefined behavior.
+ //
+ // NOTE: The backing store for the TextMapWriter may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ Set(key, val string)
+}
+
+// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
+// the caller can decode a propagated SpanContext as entries in a map of
+// unicode strings.
+type TextMapReader interface {
+ // ForeachKey returns TextMap contents via repeated calls to the `handler`
+ // function. If any call to `handler` returns a non-nil error, ForeachKey
+ // terminates and returns that error.
+ //
+ // NOTE: The backing store for the TextMapReader may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ //
+ // The "foreach" callback pattern reduces unnecessary copying in some cases
+ // and also allows implementations to hold locks while the map is read.
+ ForeachKey(handler func(key, val string) error) error
+}
+
+// TextMapCarrier allows the use of regular map[string]string
+// as both TextMapWriter and TextMapReader.
+type TextMapCarrier map[string]string
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, v := range c {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements Set() of opentracing.TextMapWriter
+func (c TextMapCarrier) Set(key, val string) {
+ c[key] = val
+}
+
+// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
+//
+// Example usage for server side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+//
+// Example usage for client side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// err := tracer.Inject(
+// span.Context(),
+// opentracing.HTTPHeaders,
+// carrier)
+//
+type HTTPHeadersCarrier http.Header
+
+// Set conforms to the TextMapWriter interface.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+ h := http.Header(c)
+ h.Set(key, val)
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, vals := range c {
+ for _, v := range vals {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go
new file mode 100644
index 000000000..0d3fb5341
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/span.go
@@ -0,0 +1,189 @@
+package opentracing
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// SpanContext represents Span state that must propagate to descendant Spans and across process
+// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
+type SpanContext interface {
+ // ForeachBaggageItem grants access to all baggage items stored in the
+ // SpanContext.
+ // The handler function will be called for each baggage key/value pair.
+ // The ordering of items is not guaranteed.
+ //
+ // The bool return value indicates if the handler wants to continue iterating
+ // through the rest of the baggage items; for example if the handler is trying to
+ // find some baggage item by pattern matching the name, it can return false
+ // as soon as the item is found to stop further iterations.
+ ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// Span represents an active, un-finished span in the OpenTracing system.
+//
+// Spans are created by the Tracer interface.
+type Span interface {
+ // Sets the end timestamp and finalizes Span state.
+ //
+ // With the exception of calls to Context() (which are always allowed),
+ // Finish() must be the last call made to any span instance, and to do
+ // otherwise leads to undefined behavior.
+ Finish()
+ // FinishWithOptions is like Finish() but with explicit control over
+ // timestamps and log data.
+ FinishWithOptions(opts FinishOptions)
+
+ // Context() yields the SpanContext for this Span. Note that the return
+ // value of Context() is still valid after a call to Span.Finish(), as is
+ // a call to Span.Context() after a call to Span.Finish().
+ Context() SpanContext
+
+ // Sets or changes the operation name.
+ //
+ // Returns a reference to this Span for chaining.
+ SetOperationName(operationName string) Span
+
+ // Adds a tag to the span.
+ //
+ // If there is a pre-existing tag set for `key`, it is overwritten.
+ //
+ // Tag values can be numeric types, strings, or bools. The behavior of
+ // other tag value types is undefined at the OpenTracing level. If a
+ // tracing system does not know how to handle a particular value type, it
+ // may ignore the tag, but shall not panic.
+ //
+ // Returns a reference to this Span for chaining.
+ SetTag(key string, value interface{}) Span
+
+ // LogFields is an efficient and type-checked way to record key:value
+ // logging data about a Span, though the programming interface is a little
+ // more verbose than LogKV(). Here's an example:
+ //
+ // span.LogFields(
+ // log.String("event", "soft error"),
+ // log.String("type", "cache timeout"),
+ // log.Int("waited.millis", 1500))
+ //
+ // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
+ LogFields(fields ...log.Field)
+
+ // LogKV is a concise, readable way to record key:value logging data about
+ // a Span, though unfortunately this also makes it less efficient and less
+ // type-safe than LogFields(). Here's an example:
+ //
+ // span.LogKV(
+ // "event", "soft error",
+ // "type", "cache timeout",
+ // "waited.millis", 1500)
+ //
+ // For LogKV (as opposed to LogFields()), the parameters must appear as
+ // key-value pairs, like
+ //
+ // span.LogKV(key1, val1, key2, val2, key3, val3, ...)
+ //
+ // The keys must all be strings. The values may be strings, numeric types,
+ // bools, Go error instances, or arbitrary structs.
+ //
+ // (Note to implementors: consider the log.InterleavedKVToFields() helper)
+ LogKV(alternatingKeyValues ...interface{})
+
+ // SetBaggageItem sets a key:value pair on this Span and its SpanContext
+ // that also propagates to descendants of this Span.
+ //
+ // SetBaggageItem() enables powerful functionality given a full-stack
+ // opentracing integration (e.g., arbitrary application data from a mobile
+ // app can make it, transparently, all the way into the depths of a storage
+ // system), and with it some powerful costs: use this feature with care.
+ //
+ // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
+ // *future* causal descendants of the associated Span.
+ //
+ // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
+ // value is copied into every local *and remote* child of the associated
+ // Span, and that can add up to a lot of network and cpu overhead.
+ //
+ // Returns a reference to this Span for chaining.
+ SetBaggageItem(restrictedKey, value string) Span
+
+ // Gets the value for a baggage item given its key. Returns the empty string
+ // if the value isn't found in this Span.
+ BaggageItem(restrictedKey string) string
+
+ // Provides access to the Tracer that created this Span.
+ Tracer() Tracer
+
+ // Deprecated: use LogFields or LogKV
+ LogEvent(event string)
+ // Deprecated: use LogFields or LogKV
+ LogEventWithPayload(event string, payload interface{})
+ // Deprecated: use LogFields or LogKV
+ Log(data LogData)
+}
+
+// LogRecord is data associated with a single Span log. Every LogRecord
+// instance must specify at least one Field.
+type LogRecord struct {
+ Timestamp time.Time
+ Fields []log.Field
+}
+
+// FinishOptions allows Span.FinishWithOptions callers to override the finish
+// timestamp and provide log data via a bulk interface.
+type FinishOptions struct {
+ // FinishTime overrides the Span's finish time, or implicitly becomes
+ // time.Now() if FinishTime.IsZero().
+ //
+ // FinishTime must resolve to a timestamp that's >= the Span's StartTime
+ // (per StartSpanOptions).
+ FinishTime time.Time
+
+ // LogRecords allows the caller to specify the contents of many LogFields()
+ // calls with a single slice. May be nil.
+ //
+ // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
+ // be set explicitly). Also, they must be >= the Span's start timestamp and
+ // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
+ // behavior of FinishWithOptions() is undefined.
+ //
+ // If specified, the caller hands off ownership of LogRecords at
+ // FinishWithOptions() invocation time.
+ //
+ // If specified, the (deprecated) BulkLogData must be nil or empty.
+ LogRecords []LogRecord
+
+ // BulkLogData is DEPRECATED.
+ BulkLogData []LogData
+}
+
+// LogData is DEPRECATED
+type LogData struct {
+ Timestamp time.Time
+ Event string
+ Payload interface{}
+}
+
+// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
+func (ld *LogData) ToLogRecord() LogRecord {
+ var literalTimestamp time.Time
+ if ld.Timestamp.IsZero() {
+ literalTimestamp = time.Now()
+ } else {
+ literalTimestamp = ld.Timestamp
+ }
+ rval := LogRecord{
+ Timestamp: literalTimestamp,
+ }
+ if ld.Payload == nil {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ }
+ } else {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ log.Object("payload", ld.Payload),
+ }
+ }
+ return rval
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
new file mode 100644
index 000000000..7bca1f736
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -0,0 +1,305 @@
+package opentracing
+
+import "time"
+
+// Tracer is a simple, thin interface for Span creation and SpanContext
+// propagation.
+type Tracer interface {
+
+ // Create, start, and return a new Span with the given `operationName` and
+ // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
+ // from the "functional options" pattern, per
+ // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
+ //
+ // A Span with no SpanReference options (e.g., opentracing.ChildOf() or
+ // opentracing.FollowsFrom()) becomes the root of its own trace.
+ //
+ // Examples:
+ //
+ // var tracer opentracing.Tracer = ...
+ //
+ // // The root-span case:
+ // sp := tracer.StartSpan("GetFeed")
+ //
+ // // The vanilla child span case:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()))
+ //
+ // // All the bells and whistles:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()),
+ // opentracing.Tag{"user_agent", loggedReq.UserAgent},
+ // opentracing.StartTime(loggedReq.Timestamp),
+ // )
+ //
+ StartSpan(operationName string, opts ...StartSpanOption) Span
+
+ // Inject() takes the `sm` SpanContext instance and injects it for
+ // propagation within `carrier`. The actual type of `carrier` depends on
+ // the value of `format`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see
+ // https://godoc.org/golang.org/x/net/context#WithValue).
+ //
+ // Example usage (sans error handling):
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := tracer.Inject(
+ // span.Context(),
+ // opentracing.HTTPHeaders,
+ // carrier)
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Implementations may return opentracing.ErrUnsupportedFormat if `format`
+ // is not supported by (or not known by) the implementation.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if the format is supported but injection
+ // fails anyway.
+ //
+ // See Tracer.Extract().
+ Inject(sm SpanContext, format interface{}, carrier interface{}) error
+
+ // Extract() returns a SpanContext instance given `format` and `carrier`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see
+ // https://godoc.org/golang.org/x/net/context#WithValue).
+ //
+ // Example usage (with StartSpan):
+ //
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+ //
+ // // ... assuming the ultimate goal here is to resume the trace with a
+ // // server-side Span:
+ // var serverSpan opentracing.Span
+ // if err == nil {
+ // span = tracer.StartSpan(
+ // rpcMethodName, ext.RPCServerOption(clientContext))
+ // } else {
+ // span = tracer.StartSpan(rpcMethodName)
+ // }
+ //
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Return values:
+ // - A successful Extract returns a SpanContext instance and a nil error
+ // - If there was simply no SpanContext to extract in `carrier`, Extract()
+ // returns (nil, opentracing.ErrSpanContextNotFound)
+ // - If `format` is unsupported or unrecognized, Extract() returns (nil,
+ // opentracing.ErrUnsupportedFormat)
+ // - If there are more fundamental problems with the `carrier` object,
+ // Extract() may return opentracing.ErrInvalidCarrier,
+ // opentracing.ErrSpanContextCorrupted, or implementation-specific
+ // errors.
+ //
+ // See Tracer.Inject().
+ Extract(format interface{}, carrier interface{}) (SpanContext, error)
+}
+
+// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
+// mechanism to override the start timestamp, specify Span References, and make
+// a single Tag or multiple Tags available at Span start time.
+//
+// StartSpan() callers should look at the StartSpanOption interface and
+// implementations available in this package.
+//
+// Tracer implementations can convert a slice of `StartSpanOption` instances
+// into a `StartSpanOptions` struct like so:
+//
+// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+// sso := opentracing.StartSpanOptions{}
+// for _, o := range opts {
+// o.Apply(&sso)
+// }
+// ...
+// }
+//
+type StartSpanOptions struct {
+ // Zero or more causal references to other Spans (via their SpanContext).
+ // If empty, start a "root" Span (i.e., start a new trace).
+ References []SpanReference
+
+ // StartTime overrides the Span's start time, or implicitly becomes
+ // time.Now() if StartTime.IsZero().
+ StartTime time.Time
+
+ // Tags may have zero or more entries; the restrictions on map values are
+ // identical to those for Span.SetTag(). May be nil.
+ //
+ // If specified, the caller hands off ownership of Tags at
+ // StartSpan() invocation time.
+ Tags map[string]interface{}
+}
+
+// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
+//
+// StartSpanOption borrows from the "functional options" pattern, per
+// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type StartSpanOption interface {
+ Apply(*StartSpanOptions)
+}
+
+// SpanReferenceType is an enum type describing different categories of
+// relationships between two Spans. If Span-2 refers to Span-1, the
+// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
+// ChildOfRef means that Span-1 created Span-2.
+//
+// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
+// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
+// or Span-2 may be sitting in a distributed queue behind Span-1.
+type SpanReferenceType int
+
+const (
+ // ChildOfRef refers to a parent Span that caused *and* somehow depends
+ // upon the new child Span. Often (but not always), the parent Span cannot
+ // finish until the child Span does.
+ //
+ // An timing diagram for a ChildOfRef that's blocked on the new Span:
+ //
+ // [-Parent Span---------]
+ // [-Child Span----]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.ChildOf()
+ ChildOfRef SpanReferenceType = iota
+
+ // FollowsFromRef refers to a parent Span that does not depend in any way
+ // on the result of the new child Span. For instance, one might use
+ // FollowsFromRefs to describe pipeline stages separated by queues,
+ // or a fire-and-forget cache insert at the tail end of a web request.
+ //
+ // A FollowsFromRef Span is part of the same logical trace as the new Span:
+ // i.e., the new Span is somehow caused by the work of its FollowsFromRef.
+ //
+ // All of the following could be valid timing diagrams for children that
+ // "FollowFrom" a parent.
+ //
+ // [-Parent Span-] [-Child Span-]
+ //
+ //
+ // [-Parent Span--]
+ // [-Child Span-]
+ //
+ //
+ // [-Parent Span-]
+ // [-Child Span-]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.FollowsFrom()
+ FollowsFromRef
+)
+
+// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
+// referenced SpanContext. See the SpanReferenceType documentation for
+// supported relationships. If SpanReference is created with
+// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
+// syntax for starting spans:
+//
+// sc, _ := tracer.Extract(someFormat, someCarrier)
+// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
+//
+// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
+// not add the parent span reference to the options.
+type SpanReference struct {
+ Type SpanReferenceType
+ ReferencedContext SpanContext
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (r SpanReference) Apply(o *StartSpanOptions) {
+ if r.ReferencedContext != nil {
+ o.References = append(o.References, r)
+ }
+}
+
+// ChildOf returns a StartSpanOption pointing to a dependent parent span.
+// If sc == nil, the option has no effect.
+//
+// See ChildOfRef, SpanReference
+func ChildOf(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: ChildOfRef,
+ ReferencedContext: sc,
+ }
+}
+
+// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
+// the child Span but does not directly depend on its result in any way.
+// If sc == nil, the option has no effect.
+//
+// See FollowsFromRef, SpanReference
+func FollowsFrom(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: FollowsFromRef,
+ ReferencedContext: sc,
+ }
+}
+
+// StartTime is a StartSpanOption that sets an explicit start timestamp for the
+// new Span.
+type StartTime time.Time
+
+// Apply satisfies the StartSpanOption interface.
+func (t StartTime) Apply(o *StartSpanOptions) {
+ o.StartTime = time.Time(t)
+}
+
+// Tags are a generic map from an arbitrary string key to an opaque value type.
+// The underlying tracing system is responsible for interpreting and
+// serializing the values.
+type Tags map[string]interface{}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tags) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ for k, v := range t {
+ o.Tags[k] = v
+ }
+}
+
+// Tag may be passed as a StartSpanOption to add a tag to new spans,
+// or its Set method may be used to apply the tag to an existing Span,
+// for example:
+//
+// tracer.StartSpan("opName", Tag{"Key", value})
+//
+// or
+//
+// Tag{"key", value}.Set(span)
+type Tag struct {
+ Key string
+ Value interface{}
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tag) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ o.Tags[t.Key] = t.Value
+}
+
+// Set applies the tag to an existing Span.
+func (t Tag) Set(s Span) {
+ s.SetTag(t.Key, t.Value)
+}
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 000000000..851fcc087
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,736 @@
+![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Many of the most widely used Go projects are built using Cobra including:
+
+* [Kubernetes](http://kubernetes.io/)
+* [Hugo](http://gohugo.io)
+* [rkt](https://github.com/coreos/rkt)
+* [etcd](https://github.com/coreos/etcd)
+* [Moby (former Docker)](https://github.com/moby/moby)
+* [Docker (distribution)](https://github.com/docker/distribution)
+* [OpenShift](https://www.openshift.com/)
+* [Delve](https://github.com/derekparker/delve)
+* [GopherJS](http://www.gopherjs.org/)
+* [CockroachDB](http://www.cockroachlabs.com/)
+* [Bleve](http://www.blevesearch.com/)
+* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [rclone](http://rclone.org/)
+* [nehm](https://github.com/bogem/nehm)
+* [Pouch](https://github.com/alibaba/pouch)
+
+[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
+[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
+[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+ * [Commands](#commands)
+ * [Flags](#flags)
+- [Installing](#installing)
+- [Getting Started](#getting-started)
+ * [Using the Cobra Generator](#using-the-cobra-generator)
+ * [Using the Cobra Library](#using-the-cobra-library)
+ * [Working with Flags](#working-with-flags)
+ * [Positional and Custom Arguments](#positional-and-custom-arguments)
+ * [Example](#example)
+ * [Help Command](#help-command)
+ * [Usage Message](#usage-message)
+ * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
+ * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
+ * [Generating documentation for your command](#generating-documentation-for-your-command)
+ * [Generating bash completions](#generating-bash-completions)
+- [Contributing](#contributing)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated bash autocomplete for your application
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications will read like sentences when used. Users will know how
+to use the application because they will natively understand how to use it.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+ or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+ go get -u github.com/spf13/cobra/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Getting Started
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+ ▾ appName/
+ ▾ cmd/
+ add.go
+ your.go
+ commands.go
+ here.go
+ main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+ love by spf13 and friends in Go.
+ Complete documentation is available at http://hugo.spf13.com`,
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+import (
+ "fmt"
+ "os"
+
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func init() {
+ cobra.OnInitialize(initConfig)
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
+ rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
+ viper.SetDefault("license", "apache")
+}
+
+func initConfig() {
+ // Don't forget to read config either from cfgFile or from home directory!
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
+
+ if err := viper.ReadInConfig(); err != nil {
+ fmt.Println("Can't read config:", err)
+ os.Exit(1)
+ }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
+}
+```
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent' meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally which will only apply to that specific command.
+
+```go
+rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default Cobra only parses local flags on the target command, any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+ Use: "print [OPTIONS] [COMMANDS]",
+ TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example the persistent flag `author` is bound with `viper`.
+**Note**, that the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ if len(args) < 1 {
+ return errors.New("requires at least one arg")
+ }
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdTimes = &cobra.Command{
+ Use: "times [# times] [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called. Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+ $ cobra help
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.
+
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+ $ cobra --invalid
+ Error: unknown flag: --invalid
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+ server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+ delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. in the following formats:
+
+- [Markdown](doc/md_docs.md)
+- [ReStructured Text](doc/rest_docs.md)
+- [Man Page](doc/man_docs.md)
+
+## Generating bash completions
+
+Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
+
+# Contributing
+
+1. Fork it
+2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+3. Create your feature branch (`git checkout -b my-new-feature`)
+4. Make changes and add them (`git add .`)
+5. Commit your changes (`git commit -m 'Add some feature'`)
+6. Push to the branch (`git push origin my-new-feature`)
+7. Create new pull request
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 000000000..a5d8a9273
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,89 @@
+package cobra
+
+import (
+ "fmt"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ for _, v := range args {
+ if !stringInSlice(v, cmd.ValidArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 000000000..8fa8f486f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,584 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf *bytes.Buffer, name string) {
+ buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ buf.WriteString(fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+ return 0;
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions=("${must_have_one_noun[@]}")
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name))
+}
+
+func writePostscript(buf *bytes.Buffer, name string) {
+ name = strings.Replace(name, ":", "__", -1)
+ buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
+ buf.WriteString(fmt.Sprintf(`{
+ local cur prev words cword
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local last_command
+ local nouns=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ buf.WriteString("\n")
+}
+
+func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ buf.WriteString(" flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+}
+
+func writeFlags(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ if localNonPersistentFlags.Lookup(flag.Name) != nil {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ buf.WriteString("\n")
+}
+
+func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ for key := range flag.Annotations {
+ switch key {
+ case BashCompOneRequiredFlag:
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
+ }
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_noun=()\n")
+ sort.Sort(sort.StringSlice(cmd.ValidArgs))
+ for _, value := range cmd.ValidArgs {
+ buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+}
+
+func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Sort(sort.StringSlice(cmd.Aliases))
+
+ buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ buf.WriteString(` fi`)
+ buf.WriteString("\n")
+}
+func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" noun_aliases=()\n")
+ sort.Sort(sort.StringSlice(cmd.ArgAliases))
+ for _, value := range cmd.ArgAliases {
+ buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf *bytes.Buffer, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.Replace(commandName, " ", "_", -1)
+ commandName = strings.Replace(commandName, ":", "__", -1)
+
+ if cmd.Root() == cmd {
+ buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
+ buf.WriteString("\n")
+ buf.WriteString(" command_aliases=()\n")
+ buf.WriteString("\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ buf.WriteString("}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 000000000..7010fd15b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,200 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText string = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ template := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ }
+ for i := range d {
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 000000000..34d1bf367
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1517 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // Long is the long message shown in the 'help <this-command>' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+ ValidArgs []string
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the bash completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom functions used by the bash autocompletion generator.
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ //FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // output is an output writer defined by user.
+ output io.Writer
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (c *Command) SetOutput(output io.Writer) {
+ c.output = output
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.output != nil {
+ return c.output
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString return usage string.
+func (c *Command) UsageString() string {
+ tmpOutput := c.output
+ bb := new(bytes.Buffer)
+ c.SetOutput(bb)
+ c.Usage()
+ c.output = tmpOutput
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+ for i, y := range args {
+ if x == y {
+ ret := []string{}
+ ret = append(ret, args[:i]...)
+ ret = append(ret, args[i+1:]...)
+ return ret
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[1] == '-') ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ suggestionsString := ""
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ suggestionsString += "\n\nDid you mean this?\n"
+ for _, s := range suggestions {
+ suggestionsString += fmt.Sprintf("\t%v\n", s)
+ }
+ }
+ return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if cmd.Name() == next || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ return matches[0]
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("Called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ break
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.validateRequiredFlags(); err != nil {
+ return err
+ }
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ break
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help as the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpCmd()
+
+ var args []string
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ } else {
+ args = c.args
+ }
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ c.Printf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if err == flag.ErrHelp {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilentErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ }
+
+ // If root command has SilentUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return nil
+ }
+ return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().Bool("version", false, usage)
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ c.Root().Usage()
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.Help()
+ }
+ },
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + c.Use
+ } else {
+ useline = c.Use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if a == s {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parents commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ //do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 000000000..6159c1cc1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 000000000..edec728e4
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,20 @@
+// +build windows
+
+package cobra
+
+import (
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ time.Sleep(5 * time.Second)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 000000000..889c22e27
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,126 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// GenZshCompletionFile generates zsh completion file.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenZshCompletion(outFile)
+}
+
+// GenZshCompletion generates a zsh completion file and writes to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+
+ writeHeader(buf, c)
+ maxDepth := maxDepth(c)
+ writeLevelMapping(buf, maxDepth)
+ writeLevelCases(buf, maxDepth, c)
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func writeHeader(w io.Writer, cmd *Command) {
+ fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name())
+}
+
+func maxDepth(c *Command) int {
+ if len(c.Commands()) == 0 {
+ return 0
+ }
+ maxDepthSub := 0
+ for _, s := range c.Commands() {
+ subDepth := maxDepth(s)
+ if subDepth > maxDepthSub {
+ maxDepthSub = subDepth
+ }
+ }
+ return 1 + maxDepthSub
+}
+
+func writeLevelMapping(w io.Writer, numLevels int) {
+ fmt.Fprintln(w, `_arguments \`)
+ for i := 1; i <= numLevels; i++ {
+ fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i)
+ fmt.Fprintln(w)
+ }
+ fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files")
+ fmt.Fprintln(w)
+}
+
+func writeLevelCases(w io.Writer, maxDepth int, root *Command) {
+ fmt.Fprintln(w, "case $state in")
+ defer fmt.Fprintln(w, "esac")
+
+ for i := 1; i <= maxDepth; i++ {
+ fmt.Fprintf(w, " level%d)\n", i)
+ writeLevel(w, root, i)
+ fmt.Fprintln(w, " ;;")
+ }
+ fmt.Fprintln(w, " *)")
+ fmt.Fprintln(w, " _arguments '*: :_files'")
+ fmt.Fprintln(w, " ;;")
+}
+
+func writeLevel(w io.Writer, root *Command, i int) {
+ fmt.Fprintf(w, " case $words[%d] in\n", i)
+ defer fmt.Fprintln(w, " esac")
+
+ commands := filterByLevel(root, i)
+ byParent := groupByParent(commands)
+
+ for p, c := range byParent {
+ names := names(c)
+ fmt.Fprintf(w, " %s)\n", p)
+ fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " "))
+ fmt.Fprintln(w, " ;;")
+ }
+ fmt.Fprintln(w, " *)")
+ fmt.Fprintln(w, " _arguments '*: :_files'")
+ fmt.Fprintln(w, " ;;")
+
+}
+
+func filterByLevel(c *Command, l int) []*Command {
+ cs := make([]*Command, 0)
+ if l == 0 {
+ cs = append(cs, c)
+ return cs
+ }
+ for _, s := range c.Commands() {
+ cs = append(cs, filterByLevel(s, l-1)...)
+ }
+ return cs
+}
+
+func groupByParent(commands []*Command) map[string][]*Command {
+ m := make(map[string][]*Command)
+ for _, c := range commands {
+ parent := c.Parent()
+ if parent == nil {
+ continue
+ }
+ m[parent.Name()] = append(m[parent.Name()], c)
+ }
+ return m
+}
+
+func names(commands []*Command) []string {
+ ns := make([]string, len(commands))
+ for i, c := range commands {
+ ns[i] = c.Name()
+ }
+ return ns
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
new file mode 100644
index 000000000..6d9546e5b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -0,0 +1,270 @@
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
+
+# Jaeger Bindings for Go OpenTracing API
+
+Instrumentation library that implements an
+[OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io).
+
+**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
+ * :white_check_mark: `import "github.com/uber/jaeger-client-go"`
+ * :x: `import "github.com/jaegertracing/jaeger-client-go"`
+
+## How to Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## Installation
+
+We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide)
+and [semantic versioning](http://semver.org/) when including this library into an application.
+For example, Jaeger backend imports this library like this:
+
+```yaml
+- package: github.com/uber/jaeger-client-go
+ version: ^2.7.0
+```
+
+If you instead want to use the latest version in `master`, you can pull it via `go get`.
+Note that during `go get` you may see build errors due to incompatible dependencies, which is why
+we recommend using semantic versions for dependencies. The error may be fixed by running
+`make install` (it will install `glide` if you don't have it):
+
+```shell
+go get -u github.com/uber/jaeger-client-go/
+cd $GOPATH/src/github.com/uber/jaeger-client-go/
+git submodule update --init --recursive
+make install
+```
+
+## Initialization
+
+See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+and [config/example_test.go](./config/example_test.go).
+
+### Environment variables
+
+The tracer can be initialized with values coming from environment variables. None of the env vars are required
+and all of them can be overriden via direct setting of the property on the configuration object.
+
+Property| Description
+--- | ---
+JAEGER_SERVICE_NAME | The service name
+JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP
+JAEGER_AGENT_PORT | The port for communicating with agent via UDP
+JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces
+JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint
+JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint
+JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans
+JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size
+JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. "500ms" or "2s" ([valid units][timeunits])
+JAEGER_SAMPLER_TYPE | The sampler type
+JAEGER_SAMPLER_PARAM | The sampler parameter (number)
+JAEGER_SAMPLER_MANAGER_HOST_PORT | The HTTP endpoint when using the remote sampler, i.e. http://jaeger-agent:5778/sampling
+JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of
+JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy, with units, e.g. "1m" or "30s" ([valid units][timeunits])
+JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found
+JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used.
+JAEGER_RPC_METRICS | Whether to store RPC metrics
+
+By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
+`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
+to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
+secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
+variables.
+
+### Closing the tracer via `io.Closer`
+
+The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
+It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
+before exiting, e.g.
+
+```go
+tracer, closer, err := cfg.NewTracer(...)
+defer closer.Close()
+```
+
+This is especially useful for command-line tools that enable tracing, as well as
+for the long-running apps that support graceful shutdown. For example, if your deployment
+system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
+exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed.
+
+### Metrics & Monitoring
+
+The tracer emits a number of different metrics, defined in
+[metrics.go](metrics.go). The monitoring backend is expected to support
+tag-based metric names, e.g. instead of `statsd`-style string names
+like `counters.my-service.jaeger.spans.started.sampled`, the metrics
+are defined by a short name and a collection of key/value tags, for
+example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
+file for the full list and descriptions of emitted metrics.
+
+The monitoring backend is represented by the `metrics.Factory` interface from package
+[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation
+of that interface can be passed as an option to either the Configuration object or the Tracer
+constructor, for example:
+
+```go
+import (
+ "github.com/uber/jaeger-client-go/config"
+ "github.com/uber/jaeger-lib/metrics/prometheus"
+)
+
+ metricsFactory := prometheus.New()
+ tracer, closer, err := config.Configuration{
+ ServiceName: "your-service-name",
+ }.NewTracer(
+ config.Metrics(metricsFactory),
+ )
+```
+
+By default, a no-op `metrics.NullFactory` is used.
+
+### Logging
+
+The tracer can be configured with an optional logger, which will be
+used to log communication errors, or log spans if a logging reporter
+option is specified in the configuration. The logging API is abstracted
+by the [Logger](logger.go) interface. A logger instance implementing
+this interface can be set on the `Config` object before calling the
+`New` method.
+
+Besides the [zap](https://github.com/uber-go/zap) implementation
+bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
+one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
+
+## Instrumentation for Tracing
+
+Since this tracer is fully compliant with OpenTracing API 1.0,
+all code instrumentation should only use the API itself, as described
+in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
+
+## Features
+
+### Reporters
+
+A "reporter" is a component that receives the finished spans and reports
+them to somewhere. Under normal circumstances, the Tracer
+should use the default `RemoteReporter`, which sends the spans out of
+process via configurable "transport". For testing purposes, one can
+use an `InMemoryReporter` that accumulates spans in a buffer and
+allows to retrieve them for later verification. Also available are
+`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
+which logs all finished spans using their `String()` method, and a
+`CompositeReporter` that can be used to combine more than one reporter
+into one, e.g. to attach a logging reporter to the main remote reporter.
+
+### Span Reporting Transports
+
+The remote reporter uses "transports" to actually send the spans out
+of process. Currently the supported transports include:
+ * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
+ * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
+
+### Sampling
+
+The tracer does not record all spans, but only those that have the
+sampling bit set in the `flags`. When a new trace is started and a new
+unique ID is generated, a sampling decision is made whether this trace
+should be sampled. The sampling decision is propagated to all downstream
+calls via the `flags` field of the trace context. The following samplers
+are available:
+ 1. `RemotelyControlledSampler` uses one of the other simpler samplers
+ and periodically updates it by polling an external server. This
+ allows dynamic control of the sampling strategies.
+ 1. `ConstSampler` always makes the same sampling decision for all
+ trace IDs. it can be configured to either sample all traces, or
+ to sample none.
+ 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
+ for a given trace to be sampled. The actual decision is made by
+ comparing the trace ID with a random number multiplied by the
+ sampling rate.
+ 1. `RateLimitingSampler` can be used to allow only a certain fixed
+ number of traces to be sampled per second.
+
+### Baggage Injection
+
+The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
+to the span context and propagated throughout the trace. An external process can inject baggage
+by setting the special HTTP Header `jaeger-baggage` on a request:
+
+```sh
+curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
+```
+
+Baggage can also be programatically set inside your service:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+ span.SetBaggageItem("key", "value")
+}
+```
+
+Another service downstream of that can retrieve the baggage in a similar way:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+ val := span.BaggageItem("key")
+ println(val)
+}
+```
+
+### Debug Traces (Forced Sampling)
+
+#### Programmatically
+
+The OpenTracing API defines a `sampling.priority` standard tag that
+can be used to affect the sampling of a span and its children:
+
+```go
+import (
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+)
+
+span := opentracing.SpanFromContext(ctx)
+ext.SamplingPriority.Set(span, 1)
+```
+
+#### Via HTTP Headers
+
+Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
+which can be set in the incoming request, e.g.
+
+```sh
+curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
+```
+
+When Jaeger sees this header in the request that otherwise has no
+tracing context, it ensures that the new trace started for this
+request will be sampled in the "debug" mode (meaning it should survive
+all downsampling that might happen in the collection pipeline), and the
+root span will have a tag as if this statement was executed:
+
+```go
+span.SetTag("jaeger-debug-id", "some-correlation-id")
+```
+
+This allows using Jaeger UI to find the trace by this tag.
+
+### Zipkin HTTP B3 compatible header propagation
+
+Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
+by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
+
+However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
+
+## License
+
+[Apache 2.0 License](LICENSE).
+
+
+[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
+[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
+[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
+[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
+[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
+[ot-url]: http://opentracing.io
+[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
+[timeunits]: https://golang.org/pkg/time/#ParseDuration
diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
new file mode 100644
index 000000000..1037ca0e8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go/log"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+)
+
+// baggageSetter is an actor that can set a baggage value on a Span given certain
+// restrictions (eg. maxValueLength).
+type baggageSetter struct {
+ restrictionManager baggage.RestrictionManager
+ metrics *Metrics
+}
+
+func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
+ return &baggageSetter{
+ restrictionManager: restrictionManager,
+ metrics: metrics,
+ }
+}
+
+// (NB) span should hold the lock before making this call
+func (s *baggageSetter) setBaggage(span *Span, key, value string) {
+ var truncated bool
+ var prevItem string
+ restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
+ if !restriction.KeyAllowed() {
+ s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+ s.metrics.BaggageUpdateFailure.Inc(1)
+ return
+ }
+ if len(value) > restriction.MaxValueLength() {
+ truncated = true
+ value = value[:restriction.MaxValueLength()]
+ s.metrics.BaggageTruncate.Inc(1)
+ }
+ prevItem = span.context.baggage[key]
+ s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+ span.context = span.context.WithBaggageItem(key, value)
+ s.metrics.BaggageUpdateSuccess.Inc(1)
+}
+
+func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
+ if !span.context.IsSampled() {
+ return
+ }
+ fields := []log.Field{
+ log.String("event", "baggage"),
+ log.String("key", key),
+ log.String("value", value),
+ }
+ if prevItem != "" {
+ fields = append(fields, log.String("override", "true"))
+ }
+ if truncated {
+ fields = append(fields, log.String("truncated", "true"))
+ }
+ if !valid {
+ fields = append(fields, log.String("invalid", "true"))
+ }
+ span.logFieldsNoLocking(fields...)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
new file mode 100644
index 000000000..821333ddb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -0,0 +1,395 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go"
+ "github.com/uber/jaeger-client-go/internal/baggage/remote"
+ throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
+ "github.com/uber/jaeger-client-go/rpcmetrics"
+ "github.com/uber/jaeger-client-go/transport"
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+const defaultSamplingProbability = 0.001
+
+// Configuration configures and creates Jaeger Tracer
+type Configuration struct {
+ // ServiceName specifies the service name to use on the tracer.
+ // Can be provided via environment variable named JAEGER_SERVICE_NAME
+ ServiceName string `yaml:"serviceName"`
+
+ // Disabled can be provided via environment variable named JAEGER_DISABLED
+ Disabled bool `yaml:"disabled"`
+
+ // RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS
+ RPCMetrics bool `yaml:"rpc_metrics"`
+
+ // Tags can be provided via environment variable named JAEGER_TAGS
+ Tags []opentracing.Tag `yaml:"tags"`
+
+ Sampler *SamplerConfig `yaml:"sampler"`
+ Reporter *ReporterConfig `yaml:"reporter"`
+ Headers *jaeger.HeadersConfig `yaml:"headers"`
+ BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
+ Throttler *ThrottlerConfig `yaml:"throttler"`
+}
+
+// SamplerConfig allows initializing a non-default sampler. All fields are optional.
+type SamplerConfig struct {
+ // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE
+ Type string `yaml:"type"`
+
+ // Param is a value passed to the sampler.
+ // Valid values for Param field are:
+ // - for "const" sampler, 0 or 1 for always false/true respectively
+ // - for "probabilistic" sampler, a probability between 0 and 1
+ // - for "rateLimiting" sampler, the number of spans per second
+ // - for "remote" sampler, param is the same as for "probabilistic"
+ // and indicates the initial sampling rate before the actual one
+ // is received from the mothership.
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM
+ Param float64 `yaml:"param"`
+
+ // SamplingServerURL is the address of jaeger-agent's HTTP sampling server
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT
+ SamplingServerURL string `yaml:"samplingServerURL"`
+
+ // MaxOperations is the maximum number of operations that the sampler
+ // will keep track of. If an operation is not tracked, a default probabilistic
+ // sampler will be used rather than the per operation specific sampler.
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS
+ MaxOperations int `yaml:"maxOperations"`
+
+ // SamplingRefreshInterval controls how often the remotely controlled sampler will poll
+ // jaeger-agent for the appropriate sampling strategy.
+ // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
+ SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
+}
+
+// ReporterConfig configures the reporter. All fields are optional.
+type ReporterConfig struct {
+ // QueueSize controls how many spans the reporter can keep in memory before it starts dropping
+ // new spans. The queue is continuously drained by a background go-routine, as fast as spans
+ // can be sent out of process.
+ // Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
+ QueueSize int `yaml:"queueSize"`
+
+ // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
+ // It is generally not useful, as it only matters for very low traffic services.
+ // Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
+ BufferFlushInterval time.Duration
+
+ // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
+ // and logs all submitted spans. Main Configuration.Logger must be initialized in the code
+ // for this option to have any effect.
+ // Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS
+ LogSpans bool `yaml:"logSpans"`
+
+ // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address
+ // Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
+ LocalAgentHostPort string `yaml:"localAgentHostPort"`
+
+ // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL
+ // Can be set by exporting an environment variable named JAEGER_ENDPOINT
+ CollectorEndpoint string `yaml:"collectorEndpoint"`
+
+ // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
+ // Can be set by exporting an environment variable named JAEGER_USER
+ User string `yaml:"user"`
+
+ // Password instructs reporter to include a password for basic http authentication when sending spans to
+ // jaeger-collector. Can be set by exporting an environment variable named JAEGER_PASSWORD
+ Password string `yaml:"password"`
+}
+
+// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
+// certain baggage keys. All fields are optional.
+type BaggageRestrictionsConfig struct {
+ // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
+ // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
+ // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
+ // restrictions have been retrieved from jaeger-agent.
+ DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
+
+ // HostPort is the hostPort of jaeger-agent's baggage restrictions server
+ HostPort string `yaml:"hostPort"`
+
+ // RefreshInterval controls how often the baggage restriction manager will poll
+ // jaeger-agent for the most recent baggage restrictions.
+ RefreshInterval time.Duration `yaml:"refreshInterval"`
+}
+
+// ThrottlerConfig configures the throttler which can be used to throttle the
+// rate at which the client may send debug requests.
+type ThrottlerConfig struct {
+ // HostPort of jaeger-agent's credit server.
+ HostPort string `yaml:"hostPort"`
+
+ // RefreshInterval controls how often the throttler will poll jaeger-agent
+ // for more throttling credits.
+ RefreshInterval time.Duration `yaml:"refreshInterval"`
+
+ // SynchronousInitialization determines whether or not the throttler should
+ // synchronously fetch credits from the agent when an operation is seen for
+ // the first time. This should be set to true if the client will be used by
+ // a short lived service that needs to ensure that credits are fetched
+ // upfront such that sampling or throttling occurs.
+ SynchronousInitialization bool `yaml:"synchronousInitialization"`
+}
+
+type nullCloser struct{}
+
+func (*nullCloser) Close() error { return nil }
+
+// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
+// before shutdown.
+//
+// Deprecated: use NewTracer() function
+func (c Configuration) New(
+ serviceName string,
+ options ...Option,
+) (opentracing.Tracer, io.Closer, error) {
+ if serviceName != "" {
+ c.ServiceName = serviceName
+ }
+
+ return c.NewTracer(options...)
+}
+
+// NewTracer returns a new tracer based on the current configuration, using the given options,
+// and a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
+ if c.ServiceName == "" {
+ return nil, nil, errors.New("no service name provided")
+ }
+
+ if c.Disabled {
+ return &opentracing.NoopTracer{}, &nullCloser{}, nil
+ }
+ opts := applyOptions(options...)
+ tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
+ if c.RPCMetrics {
+ Observer(
+ rpcmetrics.NewObserver(
+ opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}),
+ rpcmetrics.DefaultNameNormalizer,
+ ),
+ )(&opts) // adds to c.observers
+ }
+ if c.Sampler == nil {
+ c.Sampler = &SamplerConfig{
+ Type: jaeger.SamplerTypeRemote,
+ Param: defaultSamplingProbability,
+ }
+ }
+ if c.Reporter == nil {
+ c.Reporter = &ReporterConfig{}
+ }
+
+ sampler := opts.sampler
+ if sampler == nil {
+ s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
+ if err != nil {
+ return nil, nil, err
+ }
+ sampler = s
+ }
+
+ reporter := opts.reporter
+ if reporter == nil {
+ r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
+ if err != nil {
+ return nil, nil, err
+ }
+ reporter = r
+ }
+
+ tracerOptions := []jaeger.TracerOption{
+ jaeger.TracerOptions.Metrics(tracerMetrics),
+ jaeger.TracerOptions.Logger(opts.logger),
+ jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
+ jaeger.TracerOptions.Gen128Bit(opts.gen128Bit),
+ jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
+ jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
+ }
+
+ for _, tag := range opts.tags {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+ }
+
+ for _, tag := range c.Tags {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+ }
+
+ for _, obs := range opts.observers {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
+ }
+
+ for _, cobs := range opts.contribObservers {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
+ }
+
+ for format, injector := range opts.injectors {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
+ }
+
+ for format, extractor := range opts.extractors {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
+ }
+
+ if c.BaggageRestrictions != nil {
+ mgr := remote.NewRestrictionManager(
+ c.ServiceName,
+ remote.Options.Metrics(tracerMetrics),
+ remote.Options.Logger(opts.logger),
+ remote.Options.HostPort(c.BaggageRestrictions.HostPort),
+ remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
+ remote.Options.DenyBaggageOnInitializationFailure(
+ c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
+ ),
+ )
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
+ }
+
+ if c.Throttler != nil {
+ debugThrottler := throttler.NewThrottler(
+ c.ServiceName,
+ throttler.Options.Metrics(tracerMetrics),
+ throttler.Options.Logger(opts.logger),
+ throttler.Options.HostPort(c.Throttler.HostPort),
+ throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
+ throttler.Options.SynchronousInitialization(
+ c.Throttler.SynchronousInitialization,
+ ),
+ )
+
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
+ }
+
+ tracer, closer := jaeger.NewTracer(
+ c.ServiceName,
+ sampler,
+ reporter,
+ tracerOptions...,
+ )
+
+ return tracer, closer, nil
+}
+
+// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
+// It returns a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) InitGlobalTracer(
+ serviceName string,
+ options ...Option,
+) (io.Closer, error) {
+ if c.Disabled {
+ return &nullCloser{}, nil
+ }
+ tracer, closer, err := c.New(serviceName, options...)
+ if err != nil {
+ return nil, err
+ }
+ opentracing.SetGlobalTracer(tracer)
+ return closer, nil
+}
+
+// NewSampler creates a new sampler based on the configuration
+func (sc *SamplerConfig) NewSampler(
+ serviceName string,
+ metrics *jaeger.Metrics,
+) (jaeger.Sampler, error) {
+ samplerType := strings.ToLower(sc.Type)
+ if samplerType == jaeger.SamplerTypeConst {
+ return jaeger.NewConstSampler(sc.Param != 0), nil
+ }
+ if samplerType == jaeger.SamplerTypeProbabilistic {
+ if sc.Param >= 0 && sc.Param <= 1.0 {
+ return jaeger.NewProbabilisticSampler(sc.Param)
+ }
+ return nil, fmt.Errorf(
+ "Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1",
+ sc.Param,
+ )
+ }
+ if samplerType == jaeger.SamplerTypeRateLimiting {
+ return jaeger.NewRateLimitingSampler(sc.Param), nil
+ }
+ if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
+ sc2 := *sc
+ sc2.Type = jaeger.SamplerTypeProbabilistic
+ initSampler, err := sc2.NewSampler(serviceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ options := []jaeger.SamplerOption{
+ jaeger.SamplerOptions.Metrics(metrics),
+ jaeger.SamplerOptions.InitialSampler(initSampler),
+ jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
+ }
+ if sc.MaxOperations != 0 {
+ options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations))
+ }
+ if sc.SamplingRefreshInterval != 0 {
+ options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval))
+ }
+ return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
+ }
+ return nil, fmt.Errorf("Unknown sampler type %v", sc.Type)
+}
+
+// NewReporter instantiates a new reporter that submits spans to the collector
+func (rc *ReporterConfig) NewReporter(
+ serviceName string,
+ metrics *jaeger.Metrics,
+ logger jaeger.Logger,
+) (jaeger.Reporter, error) {
+ sender, err := rc.newTransport()
+ if err != nil {
+ return nil, err
+ }
+ reporter := jaeger.NewRemoteReporter(
+ sender,
+ jaeger.ReporterOptions.QueueSize(rc.QueueSize),
+ jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
+ jaeger.ReporterOptions.Logger(logger),
+ jaeger.ReporterOptions.Metrics(metrics))
+ if rc.LogSpans && logger != nil {
+ logger.Infof("Initializing logging reporter\n")
+ reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
+ }
+ return reporter, err
+}
+
+func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
+ switch {
+ case rc.CollectorEndpoint != "" && rc.User != "" && rc.Password != "":
+ return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1),
+ transport.HTTPBasicAuth(rc.User, rc.Password)), nil
+ case rc.CollectorEndpoint != "":
+ return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1)), nil
+ default:
+ return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0)
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
new file mode 100644
index 000000000..ff70ae12c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -0,0 +1,221 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/pkg/errors"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ // environment variable names
+ envServiceName = "JAEGER_SERVICE_NAME"
+ envDisabled = "JAEGER_DISABLED"
+ envRPCMetrics = "JAEGER_RPC_METRICS"
+ envTags = "JAEGER_TAGS"
+ envSamplerType = "JAEGER_SAMPLER_TYPE"
+ envSamplerParam = "JAEGER_SAMPLER_PARAM"
+ envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT"
+ envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
+ envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
+ envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
+ envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
+ envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
+ envEndpoint = "JAEGER_ENDPOINT"
+ envUser = "JAEGER_USER"
+ envPassword = "JAEGER_PASSWORD"
+ envAgentHost = "JAEGER_AGENT_HOST"
+ envAgentPort = "JAEGER_AGENT_PORT"
+)
+
+// FromEnv uses environment variables to set the tracer's Configuration
+func FromEnv() (*Configuration, error) {
+ c := &Configuration{}
+
+ if e := os.Getenv(envServiceName); e != "" {
+ c.ServiceName = e
+ }
+
+ if e := os.Getenv(envRPCMetrics); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ c.RPCMetrics = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
+ }
+ }
+
+ if e := os.Getenv(envDisabled); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ c.Disabled = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
+ }
+ }
+
+ if e := os.Getenv(envTags); e != "" {
+ c.Tags = parseTags(e)
+ }
+
+ if s, err := samplerConfigFromEnv(); err == nil {
+ c.Sampler = s
+ } else {
+ return nil, errors.Wrap(err, "cannot obtain sampler config from env")
+ }
+
+ if r, err := reporterConfigFromEnv(); err == nil {
+ c.Reporter = r
+ } else {
+ return nil, errors.Wrap(err, "cannot obtain reporter config from env")
+ }
+
+ return c, nil
+}
+
+// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
+func samplerConfigFromEnv() (*SamplerConfig, error) {
+ sc := &SamplerConfig{}
+
+ if e := os.Getenv(envSamplerType); e != "" {
+ sc.Type = e
+ }
+
+ if e := os.Getenv(envSamplerParam); e != "" {
+ if value, err := strconv.ParseFloat(e, 64); err == nil {
+ sc.Param = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
+ }
+ }
+
+ if e := os.Getenv(envSamplerManagerHostPort); e != "" {
+ sc.SamplingServerURL = e
+ }
+
+ if e := os.Getenv(envSamplerMaxOperations); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ sc.MaxOperations = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
+ }
+ }
+
+ if e := os.Getenv(envSamplerRefreshInterval); e != "" {
+ if value, err := time.ParseDuration(e); err == nil {
+ sc.SamplingRefreshInterval = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
+ }
+ }
+
+ return sc, nil
+}
+
+// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
+func reporterConfigFromEnv() (*ReporterConfig, error) {
+ rc := &ReporterConfig{}
+
+ if e := os.Getenv(envReporterMaxQueueSize); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ rc.QueueSize = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
+ }
+ }
+
+ if e := os.Getenv(envReporterFlushInterval); e != "" {
+ if value, err := time.ParseDuration(e); err == nil {
+ rc.BufferFlushInterval = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
+ }
+ }
+
+ if e := os.Getenv(envReporterLogSpans); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ rc.LogSpans = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
+ }
+ }
+
+ if e := os.Getenv(envEndpoint); e != "" {
+ u, err := url.ParseRequestURI(e)
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e)
+ }
+ rc.CollectorEndpoint = u.String()
+ user := os.Getenv(envUser)
+ pswd := os.Getenv(envPassword)
+ if user != "" && pswd == "" || user == "" && pswd != "" {
+ return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword)
+ }
+ rc.User = user
+ rc.Password = pswd
+ } else {
+ host := jaeger.DefaultUDPSpanServerHost
+ if e := os.Getenv(envAgentHost); e != "" {
+ host = e
+ }
+
+ port := jaeger.DefaultUDPSpanServerPort
+ if e := os.Getenv(envAgentPort); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ port = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
+ }
+ }
+ rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+ }
+
+ return rc, nil
+}
+
+// parseTags parses the given string into a collection of Tags.
+// Spec for this value:
+// - comma separated list of key=value
+// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
+// is an environment variable and `defaultValue` is the value to use in case the env var is not set
+func parseTags(sTags string) []opentracing.Tag {
+ pairs := strings.Split(sTags, ",")
+ tags := make([]opentracing.Tag, 0)
+ for _, p := range pairs {
+ kv := strings.SplitN(p, "=", 2)
+ k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
+
+ if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
+ ed := strings.SplitN(v[2:len(v)-1], ":", 2)
+ e, d := ed[0], ed[1]
+ v = os.Getenv(e)
+ if v == "" && d != "" {
+ v = d
+ }
+ }
+
+ tag := opentracing.Tag{Key: k, Value: v}
+ tags = append(tags, tag)
+ }
+
+ return tags
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
new file mode 100644
index 000000000..9eed0ec75
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/options.go
@@ -0,0 +1,148 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ opentracing "github.com/opentracing/opentracing-go"
+ "github.com/uber/jaeger-lib/metrics"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+// Option is a function that sets some option on the client.
+type Option func(c *Options)
+
+// Options control behavior of the client.
+type Options struct {
+ metrics metrics.Factory
+ logger jaeger.Logger
+ reporter jaeger.Reporter
+ sampler jaeger.Sampler
+ contribObservers []jaeger.ContribObserver
+ observers []jaeger.Observer
+ gen128Bit bool
+ zipkinSharedRPCSpan bool
+ maxTagValueLength int
+ tags []opentracing.Tag
+ injectors map[interface{}]jaeger.Injector
+ extractors map[interface{}]jaeger.Extractor
+}
+
+// Metrics creates an Option that initializes Metrics in the tracer,
+// which is used to emit statistics about spans.
+func Metrics(factory metrics.Factory) Option {
+ return func(c *Options) {
+ c.metrics = factory
+ }
+}
+
+// Logger can be provided to log Reporter errors, as well as to log spans
+// if Reporter.LogSpans is set to true.
+func Logger(logger jaeger.Logger) Option {
+ return func(c *Options) {
+ c.logger = logger
+ }
+}
+
+// Reporter can be provided explicitly to override the configuration.
+// Useful for testing, e.g. by passing InMemoryReporter.
+func Reporter(reporter jaeger.Reporter) Option {
+ return func(c *Options) {
+ c.reporter = reporter
+ }
+}
+
+// Sampler can be provided explicitly to override the configuration.
+func Sampler(sampler jaeger.Sampler) Option {
+ return func(c *Options) {
+ c.sampler = sampler
+ }
+}
+
+// Observer can be registered with the Tracer to receive notifications about new Spans.
+func Observer(observer jaeger.Observer) Option {
+ return func(c *Options) {
+ c.observers = append(c.observers, observer)
+ }
+}
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new spans.
+func ContribObserver(observer jaeger.ContribObserver) Option {
+ return func(c *Options) {
+ c.contribObservers = append(c.contribObservers, observer)
+ }
+}
+
+// Gen128Bit specifies whether to generate 128bit trace IDs.
+func Gen128Bit(gen128Bit bool) Option {
+ return func(c *Options) {
+ c.gen128Bit = gen128Bit
+ }
+}
+
+// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
+// and server spans a la zipkin. If false, client and server spans will be assigned
+// different IDs.
+func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
+ return func(c *Options) {
+ c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+ }
+}
+
+// MaxTagValueLength can be provided to override the default max tag value length.
+func MaxTagValueLength(maxTagValueLength int) Option {
+ return func(c *Options) {
+ c.maxTagValueLength = maxTagValueLength
+ }
+}
+
+// Tag creates an option that adds a tracer-level tag.
+func Tag(key string, value interface{}) Option {
+ return func(c *Options) {
+ c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
+ }
+}
+
+// Injector registers an Injector with the given format.
+func Injector(format interface{}, injector jaeger.Injector) Option {
+ return func(c *Options) {
+ c.injectors[format] = injector
+ }
+}
+
+// Extractor registers an Extractor with the given format.
+func Extractor(format interface{}, extractor jaeger.Extractor) Option {
+ return func(c *Options) {
+ c.extractors[format] = extractor
+ }
+}
+
+func applyOptions(options ...Option) Options {
+ opts := Options{
+ injectors: make(map[interface{}]jaeger.Injector),
+ extractors: make(map[interface{}]jaeger.Extractor),
+ }
+ for _, option := range options {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = metrics.NullFactory
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
new file mode 100644
index 000000000..5a4e18752
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+const (
+ // JaegerClientVersion is the version of the client library reported as Span tag.
+ JaegerClientVersion = "Go-2.15.1dev"
+
+ // JaegerClientVersionTagKey is the name of the tag used to report client version.
+ JaegerClientVersionTagKey = "jaeger.version"
+
+ // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+ // if found in the carrier, forces the trace to be sampled as "debug" trace.
+ // The value of the header is recorded as the tag on the root span, so that the
+ // trace can be found in the UI using this value as a correlation ID.
+ JaegerDebugHeader = "jaeger-debug-id"
+
+ // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+ // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+ // a root span does not exist.
+ JaegerBaggageHeader = "jaeger-baggage"
+
+ // TracerHostnameTagKey used to report host name of the process.
+ TracerHostnameTagKey = "hostname"
+
+ // TracerIPTagKey used to report ip of the process.
+ TracerIPTagKey = "ip"
+
+ // TracerUUIDTagKey used to report UUID of the client process.
+ TracerUUIDTagKey = "client-uuid"
+
+ // SamplerTypeTagKey reports which sampler was used on the root span.
+ SamplerTypeTagKey = "sampler.type"
+
+ // SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
+ SamplerParamTagKey = "sampler.param"
+
+ // TraceContextHeaderName is the http header name used to propagate tracing context.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceContextHeaderName = "uber-trace-id"
+
+ // TracerStateHeaderName is deprecated.
+ // Deprecated: use TraceContextHeaderName
+ TracerStateHeaderName = TraceContextHeaderName
+
+ // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceBaggageHeaderPrefix = "uberctx-"
+
+ // SamplerTypeConst is the type of sampler that always makes the same decision.
+ SamplerTypeConst = "const"
+
+ // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
+ SamplerTypeRemote = "remote"
+
+ // SamplerTypeProbabilistic is the type of sampler that samples traces
+ // with a certain fixed probability.
+ SamplerTypeProbabilistic = "probabilistic"
+
+ // SamplerTypeRateLimiting is the type of sampler that samples
+ // only up to a fixed number of traces per second.
+ SamplerTypeRateLimiting = "ratelimiting"
+
+ // SamplerTypeLowerBound is the type of sampler that samples
+ // at least a fixed number of traces per second.
+ SamplerTypeLowerBound = "lowerbound"
+
+ // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
+ DefaultUDPSpanServerHost = "localhost"
+
+ // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
+ DefaultUDPSpanServerPort = 6831
+
+ // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
+ DefaultMaxTagValueLength = 256
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/context.go
new file mode 100644
index 000000000..8b06173d9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/context.go
@@ -0,0 +1,258 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ flagSampled = byte(1)
+ flagDebug = byte(2)
+)
+
+var (
+ errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
+ errMalformedTracerStateString = errors.New("String does not match tracer state format")
+
+ emptyContext = SpanContext{}
+)
+
+// TraceID represents unique 128bit identifier of a trace
+type TraceID struct {
+ High, Low uint64
+}
+
+// SpanID represents unique 64bit identifier of a span
+type SpanID uint64
+
+// SpanContext represents propagated span identity and state
+type SpanContext struct {
+ // traceID represents globally unique ID of the trace.
+ // Usually generated as a random number.
+ traceID TraceID
+
+ // spanID represents span ID that must be unique within its trace,
+ // but does not have to be globally unique.
+ spanID SpanID
+
+ // parentID refers to the ID of the parent span.
+ // Should be 0 if the current span is a root span.
+ parentID SpanID
+
+ // flags is a bitmap containing such bits as 'sampled' and 'debug'.
+ flags byte
+
+ // Distributed Context baggage. The is a snapshot in time.
+ baggage map[string]string
+
+ // debugID can be set to some correlation ID when the context is being
+ // extracted from a TextMap carrier.
+ //
+ // See JaegerDebugHeader in constants.go
+ debugID string
+}
+
+// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
+func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+ for k, v := range c.baggage {
+ if !handler(k, v) {
+ break
+ }
+ }
+}
+
+// IsSampled returns whether this trace was chosen for permanent storage
+// by the sampling mechanism of the tracer.
+func (c SpanContext) IsSampled() bool {
+ return (c.flags & flagSampled) == flagSampled
+}
+
+// IsDebug indicates whether sampling was explicitly requested by the service.
+func (c SpanContext) IsDebug() bool {
+ return (c.flags & flagDebug) == flagDebug
+}
+
+// IsValid indicates whether this context actually represents a valid trace.
+func (c SpanContext) IsValid() bool {
+ return c.traceID.IsValid() && c.spanID != 0
+}
+
+func (c SpanContext) String() string {
+ if c.traceID.High == 0 {
+ return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+ }
+ return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+}
+
+// ContextFromString reconstructs the Context encoded in a string
+func ContextFromString(value string) (SpanContext, error) {
+ var context SpanContext
+ if value == "" {
+ return emptyContext, errEmptyTracerStateString
+ }
+ parts := strings.Split(value, ":")
+ if len(parts) != 4 {
+ return emptyContext, errMalformedTracerStateString
+ }
+ var err error
+ if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
+ return emptyContext, err
+ }
+ if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
+ return emptyContext, err
+ }
+ if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
+ return emptyContext, err
+ }
+ flags, err := strconv.ParseUint(parts[3], 10, 8)
+ if err != nil {
+ return emptyContext, err
+ }
+ context.flags = byte(flags)
+ return context, nil
+}
+
+// TraceID returns the trace ID of this span context
+func (c SpanContext) TraceID() TraceID {
+ return c.traceID
+}
+
+// SpanID returns the span ID of this span context
+func (c SpanContext) SpanID() SpanID {
+ return c.spanID
+}
+
+// ParentID returns the parent span ID of this span context
+func (c SpanContext) ParentID() SpanID {
+ return c.parentID
+}
+
+// NewSpanContext creates a new instance of SpanContext
+func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
+ flags := byte(0)
+ if sampled {
+ flags = flagSampled
+ }
+ return SpanContext{
+ traceID: traceID,
+ spanID: spanID,
+ parentID: parentID,
+ flags: flags,
+ baggage: baggage}
+}
+
+// CopyFrom copies data from ctx into this context, including span identity and baggage.
+// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
+func (c *SpanContext) CopyFrom(ctx *SpanContext) {
+ c.traceID = ctx.traceID
+ c.spanID = ctx.spanID
+ c.parentID = ctx.parentID
+ c.flags = ctx.flags
+ if l := len(ctx.baggage); l > 0 {
+ c.baggage = make(map[string]string, l)
+ for k, v := range ctx.baggage {
+ c.baggage[k] = v
+ }
+ } else {
+ c.baggage = nil
+ }
+}
+
+// WithBaggageItem creates a new context with an extra baggage item.
+func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
+ var newBaggage map[string]string
+ if c.baggage == nil {
+ newBaggage = map[string]string{key: value}
+ } else {
+ newBaggage = make(map[string]string, len(c.baggage)+1)
+ for k, v := range c.baggage {
+ newBaggage[k] = v
+ }
+ newBaggage[key] = value
+ }
+ // Use positional parameters so the compiler will help catch new fields.
+ return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""}
+}
+
+// isDebugIDContainerOnly returns true when the instance of the context is only
+// used to return the debug/correlation ID from extract() method. This happens
+// in the situation when "jaeger-debug-id" header is passed in the carrier to
+// the extract() method, but the request otherwise has no span context in it.
+// Previously this would've returned opentracing.ErrSpanContextNotFound from the
+// extract method, but now it returns a dummy context with only debugID filled in.
+//
+// See JaegerDebugHeader in constants.go
+// See textMapPropagator#Extract
+func (c *SpanContext) isDebugIDContainerOnly() bool {
+ return !c.traceID.IsValid() && c.debugID != ""
+}
+
+// ------- TraceID -------
+
+func (t TraceID) String() string {
+ if t.High == 0 {
+ return fmt.Sprintf("%x", t.Low)
+ }
+ return fmt.Sprintf("%x%016x", t.High, t.Low)
+}
+
+// TraceIDFromString creates a TraceID from a hexadecimal string
+func TraceIDFromString(s string) (TraceID, error) {
+ var hi, lo uint64
+ var err error
+ if len(s) > 32 {
+ return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
+ } else if len(s) > 16 {
+ hiLen := len(s) - 16
+ if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ } else {
+ if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ }
+ return TraceID{High: hi, Low: lo}, nil
+}
+
+// IsValid checks if the trace ID is valid, i.e. not zero.
+func (t TraceID) IsValid() bool {
+ return t.High != 0 || t.Low != 0
+}
+
+// ------- SpanID -------
+
+func (s SpanID) String() string {
+ return fmt.Sprintf("%x", uint64(s))
+}
+
+// SpanIDFromString creates a SpanID from a hexadecimal string
+func SpanIDFromString(s string) (SpanID, error) {
+ if len(s) > 16 {
+ return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
+ }
+ id, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return SpanID(0), err
+ }
+ return SpanID(id), nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
new file mode 100644
index 000000000..4ce1881f3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
+type ContribObserver interface {
+ // Create and return a span observer. Called when a span starts.
+ // If the Observer is not interested in the given span, it must return (nil, false).
+ // E.g :
+ // func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+ // var sp opentracing.Span
+ // sso := opentracing.StartSpanOptions{}
+ // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
+ // // we have a valid SpanObserver
+ // }
+ // ...
+ // }
+ OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
+}
+
+// ContribSpanObserver is created by the Observer and receives notifications
+// about other Span events. This interface is meant to match
+// github.com/opentracing-contrib/go-observer, via duck typing, without
+// directly importing the go-observer package.
+type ContribSpanObserver interface {
+ OnSetOperationName(operationName string)
+ OnSetTag(key string, value interface{})
+ OnFinish(options opentracing.FinishOptions)
+}
+
+// wrapper observer for the old observers (see observer.go)
+type oldObserver struct {
+ obs Observer
+}
+
+func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
+ spanObserver := o.obs.OnStartSpan(operationName, options)
+ return spanObserver, spanObserver != nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
new file mode 100644
index 000000000..4f5549033
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
+It is currently using Zipkin-compatible data model and can be directly
+itegrated with Zipkin backend (http://zipkin.io).
+
+For integration instructions please refer to the README:
+
+https://github.com/uber/jaeger-client-go/blob/master/README.md
+*/
+package jaeger
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
new file mode 100644
index 000000000..19c2c055b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/header.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// HeadersConfig contains the values for the header keys that Jaeger will use.
+// These values may be either custom or default depending on whether custom
+// values were provided via a configuration.
+type HeadersConfig struct {
+ // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+ // if found in the carrier, forces the trace to be sampled as "debug" trace.
+ // The value of the header is recorded as the tag on the root span, so that the
+ // trace can be found in the UI using this value as a correlation ID.
+ JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
+
+ // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+ // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+ // a root span does not exist.
+ JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
+
+ // TraceContextHeaderName is the http header name used to propagate tracing context.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
+
+ // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
+}
+
+func (c *HeadersConfig) applyDefaults() *HeadersConfig {
+ if c.JaegerBaggageHeader == "" {
+ c.JaegerBaggageHeader = JaegerBaggageHeader
+ }
+ if c.JaegerDebugHeader == "" {
+ c.JaegerDebugHeader = JaegerDebugHeader
+ }
+ if c.TraceBaggageHeaderPrefix == "" {
+ c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
+ }
+ if c.TraceContextHeaderName == "" {
+ c.TraceContextHeaderName = TraceContextHeaderName
+ }
+ return c
+}
+
+func getDefaultHeadersConfig() *HeadersConfig {
+ return &HeadersConfig{
+ JaegerDebugHeader: JaegerDebugHeader,
+ JaegerBaggageHeader: JaegerBaggageHeader,
+ TraceContextHeaderName: TraceContextHeaderName,
+ TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
new file mode 100644
index 000000000..745729319
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ defaultMaxValueLength = 2048
+ defaultRefreshInterval = time.Minute
+ defaultHostPort = "localhost:5778"
+)
+
+// Option is a function that sets some option on the RestrictionManager
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+ denyBaggageOnInitializationFailure bool
+ metrics *jaeger.Metrics
+ logger jaeger.Logger
+ hostPort string
+ refreshInterval time.Duration
+}
+
+// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
+// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+func (options) DenyBaggageOnInitializationFailure(b bool) Option {
+ return func(o *options) {
+ o.denyBaggageOnInitializationFailure = b
+ }
+}
+
+// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+ return func(o *options) {
+ o.metrics = m
+ }
+}
+
+// Logger creates an Option that sets the logger used by the RestrictionManager.
+func (options) Logger(logger jaeger.Logger) Option {
+ return func(o *options) {
+ o.logger = logger
+ }
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
+func (options) HostPort(hostPort string) Option {
+ return func(o *options) {
+ o.hostPort = hostPort
+ }
+}
+
+// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
+// the baggage restrictions.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+ return func(o *options) {
+ o.refreshInterval = refreshInterval
+ }
+}
+
+func applyOptions(o ...Option) options {
+ opts := options{}
+ for _, option := range o {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = jaeger.NewNullMetrics()
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ if opts.hostPort == "" {
+ opts.hostPort = defaultHostPort
+ }
+ if opts.refreshInterval == 0 {
+ opts.refreshInterval = defaultRefreshInterval
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
new file mode 100644
index 000000000..a56515aca
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "fmt"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+type httpBaggageRestrictionManagerProxy struct {
+ url string
+}
+
+func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
+ v := url.Values{}
+ v.Set("service", serviceName)
+ return &httpBaggageRestrictionManagerProxy{
+ url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
+ }
+}
+
+func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) {
+ var out []*thrift.BaggageRestriction
+ if err := utils.GetJSON(s.url, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
+type RestrictionManager struct {
+ options
+
+ mux sync.RWMutex
+ serviceName string
+ restrictions map[string]*baggage.Restriction
+ thriftProxy thrift.BaggageRestrictionManager
+ pollStopped sync.WaitGroup
+ stopPoll chan struct{}
+ invalidRestriction *baggage.Restriction
+ validRestriction *baggage.Restriction
+
+ // Determines if the manager has successfully retrieved baggage restrictions from agent
+ initialized bool
+}
+
+// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
+// baggage restrictions.
+func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
+ // TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
+ // restrictionsMap will need to exist per service
+ opts := applyOptions(options...)
+ m := &RestrictionManager{
+ serviceName: serviceName,
+ options: opts,
+ restrictions: make(map[string]*baggage.Restriction),
+ thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
+ stopPoll: make(chan struct{}),
+ invalidRestriction: baggage.NewRestriction(false, 0),
+ validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
+ }
+ m.pollStopped.Add(1)
+ go m.pollManager()
+ return m
+}
+
+// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
+func (m *RestrictionManager) isReady() bool {
+ m.mux.RLock()
+ defer m.mux.RUnlock()
+ return m.initialized
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
+ m.mux.RLock()
+ defer m.mux.RUnlock()
+ if !m.initialized {
+ if m.denyBaggageOnInitializationFailure {
+ return m.invalidRestriction
+ }
+ return m.validRestriction
+ }
+ if restriction, ok := m.restrictions[key]; ok {
+ return restriction
+ }
+ return m.invalidRestriction
+}
+
+// Close stops remote polling and closes the RemoteRestrictionManager.
+func (m *RestrictionManager) Close() error {
+ close(m.stopPoll)
+ m.pollStopped.Wait()
+ return nil
+}
+
+func (m *RestrictionManager) pollManager() {
+ defer m.pollStopped.Done()
+ // attempt to initialize baggage restrictions
+ if err := m.updateRestrictions(); err != nil {
+ m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
+ }
+ ticker := time.NewTicker(m.refreshInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := m.updateRestrictions(); err != nil {
+ m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
+ }
+ case <-m.stopPoll:
+ return
+ }
+ }
+}
+
+func (m *RestrictionManager) updateRestrictions() error {
+ restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName)
+ if err != nil {
+ m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
+ return err
+ }
+ newRestrictions := m.parseRestrictions(restrictions)
+ m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ m.initialized = true
+ m.restrictions = newRestrictions
+ return nil
+}
+
+func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
+ setters := make(map[string]*baggage.Restriction, len(restrictions))
+ for _, restriction := range restrictions {
+ setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
+ }
+ return setters
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
new file mode 100644
index 000000000..c16a5c566
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage
+
+const (
+ defaultMaxValueLength = 2048
+)
+
+// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
+type Restriction struct {
+ keyAllowed bool
+ maxValueLength int
+}
+
+// NewRestriction returns a new Restriction.
+func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
+ return &Restriction{
+ keyAllowed: keyAllowed,
+ maxValueLength: maxValueLength,
+ }
+}
+
+// KeyAllowed returns whether the baggage key for this restriction is allowed.
+func (r *Restriction) KeyAllowed() bool {
+ return r.keyAllowed
+}
+
+// MaxValueLength returns the max length for the baggage value.
+func (r *Restriction) MaxValueLength() int {
+ return r.maxValueLength
+}
+
+// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
+// will return a Restriction for a specific baggage key which will determine whether the baggage
+// key is allowed for the current service and any other applicable restrictions on the baggage
+// value.
+type RestrictionManager interface {
+ GetRestriction(service, key string) *Restriction
+}
+
+// DefaultRestrictionManager allows any baggage key.
+type DefaultRestrictionManager struct {
+ defaultRestriction *Restriction
+}
+
+// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
+func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
+ if maxValueLength == 0 {
+ maxValueLength = defaultMaxValueLength
+ }
+ return &DefaultRestrictionManager{
+ defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
+ }
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
+ return m.defaultRestriction
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
new file mode 100644
index 000000000..0e10b8a5a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spanlog
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+type fieldsAsMap map[string]string
+
+// MaterializeWithJSON converts log Fields into JSON string
+// TODO refactor into pluggable materializer
+func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
+ fields := fieldsAsMap(make(map[string]string, len(logFields)))
+ for _, field := range logFields {
+ field.Marshal(fields)
+ }
+ if event, ok := fields["event"]; ok && len(fields) == 1 {
+ return []byte(event), nil
+ }
+ return json.Marshal(fields)
+}
+
+func (ml fieldsAsMap) EmitString(key, value string) {
+ ml[key] = value
+}
+
+func (ml fieldsAsMap) EmitBool(key string, value bool) {
+ ml[key] = fmt.Sprintf("%t", value)
+}
+
+func (ml fieldsAsMap) EmitInt(key string, value int) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt32(key string, value int32) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt64(key string, value int64) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
+ ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
+ ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
+ ml[key] = fmt.Sprintf("%+v", value)
+}
+
+func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
+ value(ml)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
new file mode 100644
index 000000000..f52c322fb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ defaultHostPort = "localhost:5778"
+ defaultRefreshInterval = time.Second * 5
+)
+
+// Option is a function that sets some option on the Throttler
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+ metrics *jaeger.Metrics
+ logger jaeger.Logger
+ hostPort string
+ refreshInterval time.Duration
+ synchronousInitialization bool
+}
+
+// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+ return func(o *options) {
+ o.metrics = m
+ }
+}
+
+// Logger creates an Option that sets the logger used by the Throttler.
+func (options) Logger(logger jaeger.Logger) Option {
+ return func(o *options) {
+ o.logger = logger
+ }
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
+func (options) HostPort(hostPort string) Option {
+ return func(o *options) {
+ o.hostPort = hostPort
+ }
+}
+
+// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
+// credits.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+ return func(o *options) {
+ o.refreshInterval = refreshInterval
+ }
+}
+
+// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
+// fetch credits from the agent when an operation is seen for the first time. This should be set to true
+// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
+// such that sampling or throttling occurs.
+func (options) SynchronousInitialization(b bool) Option {
+ return func(o *options) {
+ o.synchronousInitialization = b
+ }
+}
+
+func applyOptions(o ...Option) options {
+ opts := options{}
+ for _, option := range o {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = jaeger.NewNullMetrics()
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ if opts.hostPort == "" {
+ opts.hostPort = defaultHostPort
+ }
+ if opts.refreshInterval == 0 {
+ opts.refreshInterval = defaultRefreshInterval
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
new file mode 100644
index 000000000..20f434fe4
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "fmt"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/uber/jaeger-client-go"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ // minimumCredits is the minimum amount of credits necessary to not be throttled.
+ // i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
+ minimumCredits = 1.0
+)
+
+var (
+ errorUUIDNotSet = errors.New("Throttler UUID must be set")
+)
+
+type operationBalance struct {
+ Operation string `json:"operation"`
+ Balance float64 `json:"balance"`
+}
+
+type creditResponse struct {
+ Balances []operationBalance `json:"balances"`
+}
+
+type httpCreditManagerProxy struct {
+ hostPort string
+}
+
+func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
+ return &httpCreditManagerProxy{
+ hostPort: hostPort,
+ }
+}
+
+// N.B. Operations list must not be empty.
+func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
+ params := url.Values{}
+ params.Set("service", serviceName)
+ params.Set("uuid", uuid)
+ for _, op := range operations {
+ params.Add("operations", op)
+ }
+ var resp creditResponse
+ if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
+ return nil, errors.Wrap(err, "Failed to receive credits from agent")
+ }
+ return &resp, nil
+}
+
+// Throttler retrieves credits from agent and uses it to throttle operations.
+type Throttler struct {
+ options
+
+ mux sync.RWMutex
+ service string
+ uuid atomic.Value
+ creditManager *httpCreditManagerProxy
+ credits map[string]float64 // map of operation->credits
+ close chan struct{}
+ stopped sync.WaitGroup
+}
+
+// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
+// the service.
+func NewThrottler(service string, options ...Option) *Throttler {
+ opts := applyOptions(options...)
+ creditManager := newHTTPCreditManagerProxy(opts.hostPort)
+ t := &Throttler{
+ options: opts,
+ creditManager: creditManager,
+ service: service,
+ credits: make(map[string]float64),
+ close: make(chan struct{}),
+ }
+ t.stopped.Add(1)
+ go t.pollManager()
+ return t
+}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t *Throttler) IsAllowed(operation string) bool {
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ value, ok := t.credits[operation]
+ if !ok || value == 0 {
+ if !ok {
+ // NOTE: This appears to be a no-op at first glance, but it stores
+ // the operation key in the map. Necessary for functionality of
+ // Throttler#operations method.
+ t.credits[operation] = 0
+ }
+ if !t.synchronousInitialization {
+ t.metrics.ThrottledDebugSpans.Inc(1)
+ return false
+ }
+ // If it is the first time this operation is being checked, synchronously fetch
+ // the credits.
+ credits, err := t.fetchCredits([]string{operation})
+ if err != nil {
+ // Failed to receive credits from agent, try again next time
+ t.logger.Error("Failed to fetch credits: " + err.Error())
+ return false
+ }
+ if len(credits.Balances) == 0 {
+ // This shouldn't happen but just in case
+ return false
+ }
+ for _, opBalance := range credits.Balances {
+ t.credits[opBalance.Operation] += opBalance.Balance
+ }
+ }
+ return t.isAllowed(operation)
+}
+
+// Close stops the throttler from fetching credits from remote.
+func (t *Throttler) Close() error {
+ close(t.close)
+ t.stopped.Wait()
+ return nil
+}
+
+// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
+// requests are made.
+func (t *Throttler) SetProcess(process jaeger.Process) {
+ if process.UUID != "" {
+ t.uuid.Store(process.UUID)
+ }
+}
+
+// N.B. This function must be called with the Write Lock
+func (t *Throttler) isAllowed(operation string) bool {
+ credits := t.credits[operation]
+ if credits < minimumCredits {
+ t.metrics.ThrottledDebugSpans.Inc(1)
+ return false
+ }
+ t.credits[operation] = credits - minimumCredits
+ return true
+}
+
+func (t *Throttler) pollManager() {
+ defer t.stopped.Done()
+ ticker := time.NewTicker(t.refreshInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ t.refreshCredits()
+ case <-t.close:
+ return
+ }
+ }
+}
+
+func (t *Throttler) operations() []string {
+ t.mux.RLock()
+ defer t.mux.RUnlock()
+ operations := make([]string, 0, len(t.credits))
+ for op := range t.credits {
+ operations = append(operations, op)
+ }
+ return operations
+}
+
+func (t *Throttler) refreshCredits() {
+ operations := t.operations()
+ if len(operations) == 0 {
+ return
+ }
+ newCredits, err := t.fetchCredits(operations)
+ if err != nil {
+ t.metrics.ThrottlerUpdateFailure.Inc(1)
+ t.logger.Error("Failed to fetch credits: " + err.Error())
+ return
+ }
+ t.metrics.ThrottlerUpdateSuccess.Inc(1)
+
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ for _, opBalance := range newCredits.Balances {
+ t.credits[opBalance.Operation] += opBalance.Balance
+ }
+}
+
+func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
+ uuid := t.uuid.Load()
+ uuidStr, _ := uuid.(string)
+ if uuid == nil || uuidStr == "" {
+ return nil, errorUUIDNotSet
+ }
+ return t.creditManager.FetchCredits(uuidStr, t.service, operations)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
new file mode 100644
index 000000000..196ed69ca
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package throttler
+
+// Throttler is used to rate limits operations. For example, given how debug spans
+// are always sampled, a throttler can be enabled per client to rate limit the amount
+// of debug spans a client can start.
+type Throttler interface {
+ // IsAllowed determines whether the operation should be allowed and not be
+ // throttled.
+ IsAllowed(operation string) bool
+}
+
+// DefaultThrottler doesn't throttle at all.
+type DefaultThrottler struct{}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t DefaultThrottler) IsAllowed(operation string) bool {
+ return true
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go
new file mode 100644
index 000000000..8402d087c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/interop.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go"
+)
+
+// TODO this file should not be needed after TChannel PR.
+
+type formatKey int
+
+// SpanContextFormat is a constant used as OpenTracing Format.
+// Requires *SpanContext as carrier.
+// This format is intended for interop with TChannel or other Zipkin-like tracers.
+const SpanContextFormat formatKey = iota
+
+type jaegerTraceContextPropagator struct {
+ tracer *Tracer
+}
+
+func (p *jaegerTraceContextPropagator) Inject(
+ ctx SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(*SpanContext)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ carrier.CopyFrom(&ctx)
+ return nil
+}
+
+func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(*SpanContext)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ ctx := new(SpanContext)
+ ctx.CopyFrom(carrier)
+ return *ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
new file mode 100644
index 000000000..868b2a5b5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+
+ "github.com/opentracing/opentracing-go/log"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+type tags []*j.Tag
+
+// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
+func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
+ fields := tags(make([]*j.Tag, 0, len(logFields)))
+ for _, field := range logFields {
+ field.Marshal(&fields)
+ }
+ return fields
+}
+
+func (t *tags) EmitString(key, value string) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
+}
+
+func (t *tags) EmitBool(key string, value bool) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
+}
+
+func (t *tags) EmitInt(key string, value int) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt32(key string, value int32) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt64(key string, value int64) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
+}
+
+func (t *tags) EmitUint32(key string, value uint32) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitUint64(key string, value uint64) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitFloat32(key string, value float32) {
+ vDouble := float64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
+}
+
+func (t *tags) EmitFloat64(key string, value float64) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
+}
+
+func (t *tags) EmitObject(key string, value interface{}) {
+ vStr := fmt.Sprintf("%+v", value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
+}
+
+func (t *tags) EmitLazyLogger(value log.LazyLogger) {
+ value(t)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
new file mode 100644
index 000000000..6ce1caf87
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// BuildJaegerThrift builds jaeger span based on internal span.
+func BuildJaegerThrift(span *Span) *j.Span {
+ span.Lock()
+ defer span.Unlock()
+ startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+ duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+ jaegerSpan := &j.Span{
+ TraceIdLow: int64(span.context.traceID.Low),
+ TraceIdHigh: int64(span.context.traceID.High),
+ SpanId: int64(span.context.spanID),
+ ParentSpanId: int64(span.context.parentID),
+ OperationName: span.operationName,
+ Flags: int32(span.context.flags),
+ StartTime: startTime,
+ Duration: duration,
+ Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
+ Logs: buildLogs(span.logs),
+ References: buildReferences(span.references),
+ }
+ return jaegerSpan
+}
+
+// BuildJaegerProcessThrift creates a thrift Process type.
+func BuildJaegerProcessThrift(span *Span) *j.Process {
+ span.Lock()
+ defer span.Unlock()
+ return buildJaegerProcessThrift(span.tracer)
+}
+
+func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
+ process := &j.Process{
+ ServiceName: tracer.serviceName,
+ Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength),
+ }
+ if tracer.process.UUID != "" {
+ process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
+ }
+ return process
+}
+
+func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
+ jTags := make([]*j.Tag, 0, len(tags))
+ for _, tag := range tags {
+ jTag := buildTag(&tag, maxTagValueLength)
+ jTags = append(jTags, jTag)
+ }
+ return jTags
+}
+
+func buildLogs(logs []opentracing.LogRecord) []*j.Log {
+ jLogs := make([]*j.Log, 0, len(logs))
+ for _, log := range logs {
+ jLog := &j.Log{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+ Fields: ConvertLogsToJaegerTags(log.Fields),
+ }
+ jLogs = append(jLogs, jLog)
+ }
+ return jLogs
+}
+
+func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
+ jTag := &j.Tag{Key: tag.key}
+ switch value := tag.value.(type) {
+ case string:
+ vStr := truncateString(value, maxTagValueLength)
+ jTag.VStr = &vStr
+ jTag.VType = j.TagType_STRING
+ case []byte:
+ if len(value) > maxTagValueLength {
+ value = value[:maxTagValueLength]
+ }
+ jTag.VBinary = value
+ jTag.VType = j.TagType_BINARY
+ case int:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int8:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint8:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int16:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint16:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int32:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint32:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int64:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint64:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case float32:
+ vDouble := float64(value)
+ jTag.VDouble = &vDouble
+ jTag.VType = j.TagType_DOUBLE
+ case float64:
+ vDouble := float64(value)
+ jTag.VDouble = &vDouble
+ jTag.VType = j.TagType_DOUBLE
+ case bool:
+ vBool := value
+ jTag.VBool = &vBool
+ jTag.VType = j.TagType_BOOL
+ default:
+ vStr := truncateString(stringify(value), maxTagValueLength)
+ jTag.VStr = &vStr
+ jTag.VType = j.TagType_STRING
+ }
+ return jTag
+}
+
+func buildReferences(references []Reference) []*j.SpanRef {
+ retMe := make([]*j.SpanRef, 0, len(references))
+ for _, ref := range references {
+ if ref.Type == opentracing.ChildOfRef {
+ retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
+ } else if ref.Type == opentracing.FollowsFromRef {
+ retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
+ }
+ }
+ return retMe
+}
+
+func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
+ return &j.SpanRef{
+ RefType: refType,
+ TraceIdLow: int64(ctx.traceID.Low),
+ TraceIdHigh: int64(ctx.traceID.High),
+ SpanId: int64(ctx.spanID),
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go
new file mode 100644
index 000000000..894bb3dbf
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/log/logger.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "sync"
+)
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+ // Error logs a message at error priority
+ Error(msg string)
+
+ // Infof logs a message at info priority
+ Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+ log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+ log.Printf(msg, args...)
+}
+
+// NullLogger is implementation of the Logger interface that is no-op
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string) {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
+
+// BytesBufferLogger implements Logger backed by a bytes.Buffer.
+type BytesBufferLogger struct {
+ mux sync.Mutex
+ buf bytes.Buffer
+}
+
+// Error implements Logger.
+func (l *BytesBufferLogger) Error(msg string) {
+ l.mux.Lock()
+ l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
+ l.mux.Unlock()
+}
+
+// Infof implements Logger.
+func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
+ l.mux.Lock()
+ l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
+ l.mux.Unlock()
+}
+
+// String returns string representation of the underlying buffer.
+func (l *BytesBufferLogger) String() string {
+ l.mux.Lock()
+ defer l.mux.Unlock()
+ return l.buf.String()
+}
+
+// Flush empties the underlying buffer.
+func (l *BytesBufferLogger) Flush() {
+ l.mux.Lock()
+ defer l.mux.Unlock()
+ l.buf.Reset()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go
new file mode 100644
index 000000000..d4f0b5019
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/logger.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "log"
+
+// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+ // Error logs a message at error priority
+ Error(msg string)
+
+ // Infof logs a message at info priority
+ Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+ log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+ log.Printf(msg, args...)
+}
+
+// NullLogger is implementation of the Logger interface that delegates to default `log` package
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string) {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
new file mode 100644
index 000000000..e56db9b73
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/metrics.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+// Metrics is a container of all stats emitted by Jaeger tracer.
+type Metrics struct {
+ // Number of traces started by this tracer as sampled
+ TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
+
+ // Number of traces started by this tracer as not sampled
+ TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
+
+ // Number of externally started sampled traces this tracer joined
+ TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
+
+ // Number of externally started not-sampled traces this tracer joined
+ TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
+
+ // Number of sampled spans started by this tracer
+ SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"`
+
+ // Number of unsampled spans started by this tracer
+ SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"`
+
+ // Number of spans finished by this tracer
+ SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"`
+
+ // Number of errors decoding tracing context
+ DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
+
+ // Number of spans successfully reported
+ ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
+
+ // Number of spans not reported due to a Sender failure
+ ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
+
+ // Number of spans dropped due to internal queue overflow
+ ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
+
+ // Current number of spans in the reporter queue
+ ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
+
+ // Number of times the Sampler succeeded to retrieve sampling strategy
+ SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
+
+ // Number of times the Sampler failed to retrieve sampling strategy
+ SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
+
+ // Number of times the Sampler succeeded to retrieve and update sampling strategy
+ SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
+
+ // Number of times the Sampler failed to update sampling strategy
+ SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
+
+ // Number of times baggage was successfully written or updated on spans.
+ BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
+
+ // Number of times baggage failed to write or update on spans.
+ BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
+
+ // Number of times baggage was truncated as per baggage restrictions.
+ BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
+
+ // Number of times baggage restrictions were successfully updated.
+ BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
+
+ // Number of times baggage restrictions failed to update.
+ BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
+
+ // Number of times debug spans were throttled.
+ ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
+
+ // Number of times throttler successfully updated.
+ ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
+
+ // Number of times throttler failed to update.
+ ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
+}
+
+// NewMetrics creates a new Metrics struct and initializes it.
+func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
+ m := &Metrics{}
+ // TODO the namespace "jaeger" should be configurable
+ metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
+ return m
+}
+
+// NewNullMetrics creates a new Metrics struct that won't report any metrics.
+func NewNullMetrics() *Metrics {
+ return NewMetrics(metrics.NullFactory, nil)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go
new file mode 100644
index 000000000..7bbd02889
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/observer.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import opentracing "github.com/opentracing/opentracing-go"
+
+// Observer can be registered with the Tracer to receive notifications about
+// new Spans.
+//
+// Deprecated: use jaeger.ContribObserver instead.
+type Observer interface {
+ OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
+}
+
+// SpanObserver is created by the Observer and receives notifications about
+// other Span events.
+//
+// Deprecated: use jaeger.ContribSpanObserver instead.
+type SpanObserver interface {
+ OnSetOperationName(operationName string)
+ OnSetTag(key string, value interface{})
+ OnFinish(options opentracing.FinishOptions)
+}
+
+// compositeObserver is a dispatcher to other observers
+type compositeObserver struct {
+ observers []ContribObserver
+}
+
+// compositeSpanObserver is a dispatcher to other span observers
+type compositeSpanObserver struct {
+ observers []ContribSpanObserver
+}
+
+// noopSpanObserver is used when there are no observers registered
+// on the Tracer or none of them returns span observers from OnStartSpan.
+var noopSpanObserver = &compositeSpanObserver{}
+
+func (o *compositeObserver) append(contribObserver ContribObserver) {
+ o.observers = append(o.observers, contribObserver)
+}
+
+func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
+ var spanObservers []ContribSpanObserver
+ for _, obs := range o.observers {
+ spanObs, ok := obs.OnStartSpan(sp, operationName, options)
+ if ok {
+ if spanObservers == nil {
+ spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
+ }
+ spanObservers = append(spanObservers, spanObs)
+ }
+ }
+ if len(spanObservers) == 0 {
+ return noopSpanObserver
+ }
+ return &compositeSpanObserver{observers: spanObservers}
+}
+
+func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
+ for _, obs := range o.observers {
+ obs.OnSetOperationName(operationName)
+ }
+}
+
+func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
+ for _, obs := range o.observers {
+ obs.OnSetTag(key, value)
+ }
+}
+
+func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
+ for _, obs := range o.observers {
+ obs.OnFinish(options)
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go
new file mode 100644
index 000000000..30cbf9962
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/process.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// Process holds process specific metadata that's relevant to this client.
+type Process struct {
+ Service string
+ UUID string
+ Tags []Tag
+}
+
+// ProcessSetter sets a process. This can be used by any class that requires
+// the process to be set as part of initialization.
+// See internal/throttler/remote/throttler.go for an example.
+type ProcessSetter interface {
+ SetProcess(process Process)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
new file mode 100644
index 000000000..abca67a3c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -0,0 +1,300 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "net/url"
+ "strings"
+ "sync"
+
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// Injector is responsible for injecting SpanContext instances in a manner suitable
+// for propagation via a format-specific "carrier" object. Typically the
+// injection will take place across an RPC boundary, but message queues and
+// other IPC mechanisms are also reasonable places to use an Injector.
+type Injector interface {
+ // Inject takes `SpanContext` and injects it into `carrier`. The actual type
+ // of `carrier` depends on the `format` passed to `Tracer.Inject()`.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if injection fails.
+ Inject(ctx SpanContext, carrier interface{}) error
+}
+
+// Extractor is responsible for extracting SpanContext instances from a
+// format-specific "carrier" object. Typically the extraction will take place
+// on the server side of an RPC boundary, but message queues and other IPC
+// mechanisms are also reasonable places to use an Extractor.
+type Extractor interface {
+ // Extract decodes a SpanContext instance from the given `carrier`,
+ // or (nil, opentracing.ErrSpanContextNotFound) if no context could
+ // be found in the `carrier`.
+ Extract(carrier interface{}) (SpanContext, error)
+}
+
+type textMapPropagator struct {
+ headerKeys *HeadersConfig
+ metrics Metrics
+ encodeValue func(string) string
+ decodeValue func(string) string
+}
+
+func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
+ return &textMapPropagator{
+ headerKeys: headerKeys,
+ metrics: metrics,
+ encodeValue: func(val string) string {
+ return val
+ },
+ decodeValue: func(val string) string {
+ return val
+ },
+ }
+}
+
+func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
+ return &textMapPropagator{
+ headerKeys: headerKeys,
+ metrics: metrics,
+ encodeValue: func(val string) string {
+ return url.QueryEscape(val)
+ },
+ decodeValue: func(val string) string {
+ // ignore decoding errors, cannot do anything about them
+ if v, err := url.QueryUnescape(val); err == nil {
+ return v
+ }
+ return val
+ },
+ }
+}
+
+type binaryPropagator struct {
+ tracer *Tracer
+ buffers sync.Pool
+}
+
+func newBinaryPropagator(tracer *Tracer) *binaryPropagator {
+ return &binaryPropagator{
+ tracer: tracer,
+ buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
+ }
+}
+
+func (p *textMapPropagator) Inject(
+ sc SpanContext,
+ abstractCarrier interface{},
+) error {
+ textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ // Do not encode the string with trace context to avoid accidental double-encoding
+ // if people are using opentracing < 0.10.0. Our colon-separated representation
+ // of the trace context is already safe for HTTP headers.
+ textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
+ for k, v := range sc.baggage {
+ safeKey := p.addBaggageKeyPrefix(k)
+ safeVal := p.encodeValue(v)
+ textMapWriter.Set(safeKey, safeVal)
+ }
+ return nil
+}
+
+func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ var ctx SpanContext
+ var baggage map[string]string
+ err := textMapReader.ForeachKey(func(rawKey, value string) error {
+ key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
+ if key == p.headerKeys.TraceContextHeaderName {
+ var err error
+ safeVal := p.decodeValue(value)
+ if ctx, err = ContextFromString(safeVal); err != nil {
+ return err
+ }
+ } else if key == p.headerKeys.JaegerDebugHeader {
+ ctx.debugID = p.decodeValue(value)
+ } else if key == p.headerKeys.JaegerBaggageHeader {
+ if baggage == nil {
+ baggage = make(map[string]string)
+ }
+ for k, v := range p.parseCommaSeparatedMap(value) {
+ baggage[k] = v
+ }
+ } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
+ if baggage == nil {
+ baggage = make(map[string]string)
+ }
+ safeKey := p.removeBaggageKeyPrefix(key)
+ safeVal := p.decodeValue(value)
+ baggage[safeKey] = safeVal
+ }
+ return nil
+ })
+ if err != nil {
+ p.metrics.DecodingErrors.Inc(1)
+ return emptyContext, err
+ }
+ if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
+ return emptyContext, opentracing.ErrSpanContextNotFound
+ }
+ ctx.baggage = baggage
+ return ctx, nil
+}
+
+func (p *binaryPropagator) Inject(
+ sc SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(io.Writer)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ // Handle the tracer context
+ if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil {
+ return err
+ }
+
+ // Handle the baggage items
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
+ return err
+ }
+ for k, v := range sc.baggage {
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
+ return err
+ }
+ io.WriteString(carrier, k)
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
+ return err
+ }
+ io.WriteString(carrier, v)
+ }
+
+ return nil
+}
+
+func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(io.Reader)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ var ctx SpanContext
+
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+
+ // Handle the baggage items
+ var numBaggage int32
+ if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
+ ctx.baggage = make(map[string]string, iNumBaggage)
+ buf := p.buffers.Get().(*bytes.Buffer)
+ defer p.buffers.Put(buf)
+
+ var keyLen, valLen int32
+ for i := 0; i < iNumBaggage; i++ {
+ if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ buf.Reset()
+ buf.Grow(int(keyLen))
+ if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ key := buf.String()
+
+ if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ buf.Reset()
+ buf.Grow(int(valLen))
+ if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ ctx.baggage[key] = buf.String()
+ }
+ }
+
+ return ctx, nil
+}
+
+// Converts a comma separated key value pair list into a map
+// e.g. key1=value1, key2=value2, key3 = value3
+// is converted to map[string]string { "key1" : "value1",
+// "key2" : "value2",
+// "key3" : "value3" }
+func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
+ baggage := make(map[string]string)
+ value, err := url.QueryUnescape(value)
+ if err != nil {
+ log.Printf("Unable to unescape %s, %v", value, err)
+ return baggage
+ }
+ for _, kvpair := range strings.Split(value, ",") {
+ kv := strings.Split(strings.TrimSpace(kvpair), "=")
+ if len(kv) == 2 {
+ baggage[kv[0]] = kv[1]
+ } else {
+ log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
+ }
+ }
+ return baggage
+}
+
+// Converts a baggage item key into an http header format,
+// by prepending TraceBaggageHeaderPrefix and encoding the key string
+func (p *textMapPropagator) addBaggageKeyPrefix(key string) string {
+ // TODO encodeBaggageKeyAsHeader add caching and escaping
+ return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
+}
+
+func (p *textMapPropagator) removeBaggageKeyPrefix(key string) string {
+ // TODO decodeBaggageHeaderKey add caching and escaping
+ return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go
new file mode 100644
index 000000000..5646e78bb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reference.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "github.com/opentracing/opentracing-go"
+
+// Reference represents a causal reference to other Spans (via their SpanContext).
+type Reference struct {
+ Type opentracing.SpanReferenceType
+ Context SpanContext
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
new file mode 100644
index 000000000..fe6288c4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -0,0 +1,289 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go/log"
+)
+
+// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
+type Reporter interface {
+ // Report submits a new span to collectors, possibly asynchronously and/or with buffering.
+ Report(span *Span)
+
+ // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
+ Close()
+}
+
+// ------------------------------
+
+type nullReporter struct{}
+
+// NewNullReporter creates a no-op reporter that ignores all reported spans.
+func NewNullReporter() Reporter {
+ return &nullReporter{}
+}
+
+// Report implements Report() method of Reporter by doing nothing.
+func (r *nullReporter) Report(span *Span) {
+ // no-op
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *nullReporter) Close() {
+ // no-op
+}
+
+// ------------------------------
+
+type loggingReporter struct {
+ logger Logger
+}
+
+// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
+func NewLoggingReporter(logger Logger) Reporter {
+ return &loggingReporter{logger}
+}
+
+// Report implements Report() method of Reporter by logging the span to the logger.
+func (r *loggingReporter) Report(span *Span) {
+ r.logger.Infof("Reporting span %+v", span)
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *loggingReporter) Close() {
+ // no-op
+}
+
+// ------------------------------
+
+// InMemoryReporter is used for testing, and simply collects spans in memory.
+type InMemoryReporter struct {
+ spans []opentracing.Span
+ lock sync.Mutex
+}
+
+// NewInMemoryReporter creates a reporter that stores spans in memory.
+// NOTE: the Tracer should be created with options.PoolSpans = false.
+func NewInMemoryReporter() *InMemoryReporter {
+ return &InMemoryReporter{
+ spans: make([]opentracing.Span, 0, 10),
+ }
+}
+
+// Report implements Report() method of Reporter by storing the span in the buffer.
+func (r *InMemoryReporter) Report(span *Span) {
+ r.lock.Lock()
+ r.spans = append(r.spans, span)
+ r.lock.Unlock()
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *InMemoryReporter) Close() {
+ // no-op
+}
+
+// SpansSubmitted returns the number of spans accumulated in the buffer.
+func (r *InMemoryReporter) SpansSubmitted() int {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return len(r.spans)
+}
+
+// GetSpans returns accumulated spans as a copy of the buffer.
+func (r *InMemoryReporter) GetSpans() []opentracing.Span {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ copied := make([]opentracing.Span, len(r.spans))
+ copy(copied, r.spans)
+ return copied
+}
+
+// Reset clears all accumulated spans.
+func (r *InMemoryReporter) Reset() {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ r.spans = nil
+}
+
+// ------------------------------
+
+type compositeReporter struct {
+ reporters []Reporter
+}
+
+// NewCompositeReporter creates a reporter that ignores all reported spans.
+func NewCompositeReporter(reporters ...Reporter) Reporter {
+ return &compositeReporter{reporters: reporters}
+}
+
+// Report implements Report() method of Reporter by delegating to each underlying reporter.
+func (r *compositeReporter) Report(span *Span) {
+ for _, reporter := range r.reporters {
+ reporter.Report(span)
+ }
+}
+
+// Close implements Close() method of Reporter by closing each underlying reporter.
+func (r *compositeReporter) Close() {
+ for _, reporter := range r.reporters {
+ reporter.Close()
+ }
+}
+
+// ------------- REMOTE REPORTER -----------------
+
+type reporterQueueItemType int
+
+const (
+ defaultQueueSize = 100
+ defaultBufferFlushInterval = 1 * time.Second
+
+ reporterQueueItemSpan reporterQueueItemType = iota
+ reporterQueueItemClose
+)
+
+type reporterQueueItem struct {
+ itemType reporterQueueItemType
+ span *Span
+ close *sync.WaitGroup
+}
+
+type remoteReporter struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ queueLength int64
+ closed int64 // 0 - not closed, 1 - closed
+
+ reporterOptions
+
+ sender Transport
+ queue chan reporterQueueItem
+}
+
+// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
+// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
+// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
+// Calls to Close() block until all spans reported prior to the call to Close are flushed.
+func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
+ options := reporterOptions{}
+ for _, option := range opts {
+ option(&options)
+ }
+ if options.bufferFlushInterval <= 0 {
+ options.bufferFlushInterval = defaultBufferFlushInterval
+ }
+ if options.logger == nil {
+ options.logger = log.NullLogger
+ }
+ if options.metrics == nil {
+ options.metrics = NewNullMetrics()
+ }
+ if options.queueSize <= 0 {
+ options.queueSize = defaultQueueSize
+ }
+ reporter := &remoteReporter{
+ reporterOptions: options,
+ sender: sender,
+ queue: make(chan reporterQueueItem, options.queueSize),
+ }
+ go reporter.processQueue()
+ return reporter
+}
+
+// Report implements Report() method of Reporter.
+// It passes the span to a background go-routine for submission to Jaeger backend.
+// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
+// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
+// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
+// because some of them may still be successfully added to the queue.
+func (r *remoteReporter) Report(span *Span) {
+ select {
+ case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span}:
+ atomic.AddInt64(&r.queueLength, 1)
+ default:
+ r.metrics.ReporterDropped.Inc(1)
+ }
+}
+
+// Close implements Close() method of Reporter by waiting for the queue to be drained.
+func (r *remoteReporter) Close() {
+ if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
+ r.logger.Error("Repeated attempt to close the reporter is ignored")
+ return
+ }
+ r.sendCloseEvent()
+ r.sender.Close()
+}
+
+func (r *remoteReporter) sendCloseEvent() {
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
+
+ r.queue <- item // if the queue is full we will block until there is space
+ atomic.AddInt64(&r.queueLength, 1)
+ wg.Wait()
+}
+
+// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
+// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
+// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
+// reporting new spans.
+func (r *remoteReporter) processQueue() {
+ // flush causes the Sender to flush its accumulated spans and clear the buffer
+ flush := func() {
+ if flushed, err := r.sender.Flush(); err != nil {
+ r.metrics.ReporterFailure.Inc(int64(flushed))
+ r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error()))
+ } else if flushed > 0 {
+ r.metrics.ReporterSuccess.Inc(int64(flushed))
+ }
+ }
+
+ timer := time.NewTicker(r.bufferFlushInterval)
+ for {
+ select {
+ case <-timer.C:
+ flush()
+ case item := <-r.queue:
+ atomic.AddInt64(&r.queueLength, -1)
+ switch item.itemType {
+ case reporterQueueItemSpan:
+ span := item.span
+ if flushed, err := r.sender.Append(span); err != nil {
+ r.metrics.ReporterFailure.Inc(int64(flushed))
+ r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error()))
+ } else if flushed > 0 {
+ r.metrics.ReporterSuccess.Inc(int64(flushed))
+ // to reduce the number of gauge stats, we only emit queue length on flush
+ r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
+ }
+ case reporterQueueItemClose:
+ timer.Stop()
+ flush()
+ item.close.Done()
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
new file mode 100644
index 000000000..65012d701
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
@@ -0,0 +1,69 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+)
+
+// ReporterOption is a function that sets some option on the reporter.
+type ReporterOption func(c *reporterOptions)
+
+// ReporterOptions is a factory for all available ReporterOption's
+var ReporterOptions reporterOptions
+
+// reporterOptions control behavior of the reporter.
+type reporterOptions struct {
+ // queueSize is the size of internal queue where reported spans are stored before they are processed in the background
+ queueSize int
+ // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
+ bufferFlushInterval time.Duration
+ // logger is used to log errors of span submissions
+ logger Logger
+ // metrics is used to record runtime stats
+ metrics *Metrics
+}
+
+// QueueSize creates a ReporterOption that sets the size of the internal queue where
+// spans are stored before they are processed.
+func (reporterOptions) QueueSize(queueSize int) ReporterOption {
+ return func(r *reporterOptions) {
+ r.queueSize = queueSize
+ }
+}
+
+// Metrics creates a ReporterOption that initializes Metrics in the reporter,
+// which is used to record runtime statistics.
+func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
+ return func(r *reporterOptions) {
+ r.metrics = metrics
+ }
+}
+
+// BufferFlushInterval creates a ReporterOption that sets how often the queue
+// is force-flushed.
+func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
+ return func(r *reporterOptions) {
+ r.bufferFlushInterval = bufferFlushInterval
+ }
+}
+
+// Logger creates a ReporterOption that initializes the logger used to log
+// errors of span submissions.
+func (reporterOptions) Logger(logger Logger) ReporterOption {
+ return func(r *reporterOptions) {
+ r.logger = logger
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
new file mode 100644
index 000000000..879948e9c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
@@ -0,0 +1,5 @@
+An Observer that can be used to emit RPC metrics
+================================================
+
+It can be attached to the tracer during tracer construction.
+See `ExampleObserver` function in [observer_test.go](./observer_test.go).
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
new file mode 100644
index 000000000..51aa11b35
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
+package rpcmetrics
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
new file mode 100644
index 000000000..30555243d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import "sync"
+
+// normalizedEndpoints is a cache for endpointName -> safeName mappings.
+type normalizedEndpoints struct {
+ names map[string]string
+ maxSize int
+ defaultName string
+ normalizer NameNormalizer
+ mux sync.RWMutex
+}
+
+func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
+ return &normalizedEndpoints{
+ maxSize: maxSize,
+ normalizer: normalizer,
+ names: make(map[string]string, maxSize),
+ }
+}
+
+// normalize looks up the name in the cache, if not found it uses normalizer
+// to convert the name to a safe name. If called with more than maxSize unique
+// names it returns "" for all other names beyond those already cached.
+func (n *normalizedEndpoints) normalize(name string) string {
+ n.mux.RLock()
+ norm, ok := n.names[name]
+ l := len(n.names)
+ n.mux.RUnlock()
+ if ok {
+ return norm
+ }
+ if l >= n.maxSize {
+ return ""
+ }
+ return n.normalizeWithLock(name)
+}
+
+func (n *normalizedEndpoints) normalizeWithLock(name string) string {
+ norm := n.normalizer.Normalize(name)
+ n.mux.Lock()
+ defer n.mux.Unlock()
+ // cache may have grown while we were not holding the lock
+ if len(n.names) >= n.maxSize {
+ return ""
+ }
+ n.names[name] = norm
+ return norm
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
new file mode 100644
index 000000000..a8cec2fa6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+ "sync"
+
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+const (
+ otherEndpointsPlaceholder = "other"
+ endpointNameMetricTag = "endpoint"
+)
+
+// Metrics is a collection of metrics for an endpoint describing
+// throughput, success, errors, and performance.
+type Metrics struct {
+ // RequestCountSuccess is a counter of the total number of successes.
+ RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
+
+ // RequestCountFailures is a counter of the number of times any failure has been observed.
+ RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
+
+ // RequestLatencySuccess is a latency histogram of successful requests.
+ RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
+
+ // RequestLatencyFailures is a latency histogram of failed requests.
+ RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
+
+ // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
+ HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
+
+ // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
+ HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
+
+ // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
+ HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
+
+ // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
+ HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
+}
+
+func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
+ if statusCode >= 200 && statusCode < 300 {
+ m.HTTPStatusCode2xx.Inc(1)
+ } else if statusCode >= 300 && statusCode < 400 {
+ m.HTTPStatusCode3xx.Inc(1)
+ } else if statusCode >= 400 && statusCode < 500 {
+ m.HTTPStatusCode4xx.Inc(1)
+ } else if statusCode >= 500 && statusCode < 600 {
+ m.HTTPStatusCode5xx.Inc(1)
+ }
+}
+
+// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
+// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
+// to a generic endpoint name "other".
+type MetricsByEndpoint struct {
+ metricsFactory metrics.Factory
+ endpoints *normalizedEndpoints
+ metricsByEndpoint map[string]*Metrics
+ mux sync.RWMutex
+}
+
+func newMetricsByEndpoint(
+ metricsFactory metrics.Factory,
+ normalizer NameNormalizer,
+ maxNumberOfEndpoints int,
+) *MetricsByEndpoint {
+ return &MetricsByEndpoint{
+ metricsFactory: metricsFactory,
+ endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
+ metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
+ }
+}
+
+func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
+ safeName := m.endpoints.normalize(endpoint)
+ if safeName == "" {
+ safeName = otherEndpointsPlaceholder
+ }
+ m.mux.RLock()
+ met := m.metricsByEndpoint[safeName]
+ m.mux.RUnlock()
+ if met != nil {
+ return met
+ }
+
+ return m.getWithWriteLock(safeName)
+}
+
+// split to make easier to test
+func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ // it is possible that the name has been already registered after we released
+ // the read lock and before we grabbed the write lock, so check for that.
+ if met, ok := m.metricsByEndpoint[safeName]; ok {
+ return met
+ }
+
+ // it would be nice to create the struct before locking, since Init() is somewhat
+ // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
+ met := &Metrics{}
+ tags := map[string]string{endpointNameMetricTag: safeName}
+ metrics.Init(met, m.metricsFactory, tags)
+
+ m.metricsByEndpoint[safeName] = met
+ return met
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
new file mode 100644
index 000000000..148d84b3a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+// NameNormalizer is used to convert the endpoint names to strings
+// that can be safely used as tags in the metrics.
+type NameNormalizer interface {
+ Normalize(name string) string
+}
+
+// DefaultNameNormalizer converts endpoint names so that they contain only characters
+// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
+var DefaultNameNormalizer = &SimpleNameNormalizer{
+ SafeSets: []SafeCharacterSet{
+ &Range{From: 'a', To: 'z'},
+ &Range{From: 'A', To: 'Z'},
+ &Range{From: '0', To: '9'},
+ &Char{'-'},
+ &Char{'_'},
+ &Char{'/'},
+ &Char{'.'},
+ },
+ Replacement: '-',
+}
+
+// SimpleNameNormalizer uses a set of safe character sets.
+type SimpleNameNormalizer struct {
+ SafeSets []SafeCharacterSet
+ Replacement byte
+}
+
+// SafeCharacterSet determines if the given character is "safe"
+type SafeCharacterSet interface {
+ IsSafe(c byte) bool
+}
+
+// Range implements SafeCharacterSet
+type Range struct {
+ From, To byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (r *Range) IsSafe(c byte) bool {
+ return c >= r.From && c <= r.To
+}
+
+// Char implements SafeCharacterSet
+type Char struct {
+ Val byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (ch *Char) IsSafe(c byte) bool {
+ return c == ch.Val
+}
+
+// Normalize checks each character in the string against SafeSets,
+// and if it's not safe substitutes it with Replacement.
+func (n *SimpleNameNormalizer) Normalize(name string) string {
+ var retMe []byte
+ nameBytes := []byte(name)
+ for i, b := range nameBytes {
+ if n.safeByte(b) {
+ if retMe != nil {
+ retMe[i] = b
+ }
+ } else {
+ if retMe == nil {
+ retMe = make([]byte, len(nameBytes))
+ copy(retMe[0:i], nameBytes[0:i])
+ }
+ retMe[i] = n.Replacement
+ }
+ }
+ if retMe == nil {
+ return name
+ }
+ return string(retMe)
+}
+
+// safeByte checks if b against all safe charsets.
+func (n *SimpleNameNormalizer) safeByte(b byte) bool {
+ for i := range n.SafeSets {
+ if n.SafeSets[i].IsSafe(b) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
new file mode 100644
index 000000000..eca5ff6f3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/uber/jaeger-lib/metrics"
+
+ jaeger "github.com/uber/jaeger-client-go"
+)
+
+const defaultMaxNumberOfEndpoints = 200
+
+// Observer is an observer that can emit RPC metrics.
+type Observer struct {
+ metricsByEndpoint *MetricsByEndpoint
+}
+
+// NewObserver creates a new observer that can emit RPC metrics.
+func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
+ return &Observer{
+ metricsByEndpoint: newMetricsByEndpoint(
+ metricsFactory,
+ normalizer,
+ defaultMaxNumberOfEndpoints,
+ ),
+ }
+}
+
+// OnStartSpan creates a new Observer for the span.
+func (o *Observer) OnStartSpan(
+ operationName string,
+ options opentracing.StartSpanOptions,
+) jaeger.SpanObserver {
+ return NewSpanObserver(o.metricsByEndpoint, operationName, options)
+}
+
+// SpanKind identifies the span as inboud, outbound, or internal
+type SpanKind int
+
+const (
+ // Local span kind
+ Local SpanKind = iota
+ // Inbound span kind
+ Inbound
+ // Outbound span kind
+ Outbound
+)
+
+// SpanObserver collects RPC metrics
+type SpanObserver struct {
+ metricsByEndpoint *MetricsByEndpoint
+ operationName string
+ startTime time.Time
+ mux sync.Mutex
+ kind SpanKind
+ httpStatusCode uint16
+ err bool
+}
+
+// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
+func NewSpanObserver(
+ metricsByEndpoint *MetricsByEndpoint,
+ operationName string,
+ options opentracing.StartSpanOptions,
+) *SpanObserver {
+ so := &SpanObserver{
+ metricsByEndpoint: metricsByEndpoint,
+ operationName: operationName,
+ startTime: options.StartTime,
+ }
+ for k, v := range options.Tags {
+ so.handleTagInLock(k, v)
+ }
+ return so
+}
+
+// handleTags watches for special tags
+// - SpanKind
+// - HttpStatusCode
+// - Error
+func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
+ if key == string(ext.SpanKind) {
+ if v, ok := value.(ext.SpanKindEnum); ok {
+ value = string(v)
+ }
+ if v, ok := value.(string); ok {
+ if v == string(ext.SpanKindRPCClientEnum) {
+ so.kind = Outbound
+ } else if v == string(ext.SpanKindRPCServerEnum) {
+ so.kind = Inbound
+ }
+ }
+ return
+ }
+ if key == string(ext.HTTPStatusCode) {
+ if v, ok := value.(uint16); ok {
+ so.httpStatusCode = v
+ } else if v, ok := value.(int); ok {
+ so.httpStatusCode = uint16(v)
+ } else if v, ok := value.(string); ok {
+ if vv, err := strconv.Atoi(v); err == nil {
+ so.httpStatusCode = uint16(vv)
+ }
+ }
+ return
+ }
+ if key == string(ext.Error) {
+ if v, ok := value.(bool); ok {
+ so.err = v
+ } else if v, ok := value.(string); ok {
+ if vv, err := strconv.ParseBool(v); err == nil {
+ so.err = vv
+ }
+ }
+ return
+ }
+}
+
+// OnFinish emits the RPC metrics. It only has an effect when operation name
+// is not blank, and the span kind is an RPC server.
+func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
+ so.mux.Lock()
+ defer so.mux.Unlock()
+
+ if so.operationName == "" || so.kind != Inbound {
+ return
+ }
+
+ mets := so.metricsByEndpoint.get(so.operationName)
+ latency := options.FinishTime.Sub(so.startTime)
+ if so.err {
+ mets.RequestCountFailures.Inc(1)
+ mets.RequestLatencyFailures.Record(latency)
+ } else {
+ mets.RequestCountSuccess.Inc(1)
+ mets.RequestLatencySuccess.Record(latency)
+ }
+ mets.recordHTTPStatusCode(so.httpStatusCode)
+}
+
+// OnSetOperationName records new operation name.
+func (so *SpanObserver) OnSetOperationName(operationName string) {
+ so.mux.Lock()
+ so.operationName = operationName
+ so.mux.Unlock()
+}
+
+// OnSetTag implements SpanObserver
+func (so *SpanObserver) OnSetTag(key string, value interface{}) {
+ so.mux.Lock()
+ so.handleTagInLock(key, value)
+ so.mux.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
new file mode 100644
index 000000000..3e1630953
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler.go
@@ -0,0 +1,557 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "math"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/thrift-gen/sampling"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ defaultSamplingServerURL = "http://localhost:5778/sampling"
+ defaultSamplingRefreshInterval = time.Minute
+ defaultMaxOperations = 2000
+)
+
+// Sampler decides whether a new trace should be sampled or not.
+type Sampler interface {
+ // IsSampled decides whether a trace with given `id` and `operation`
+ // should be sampled. This function will also return the tags that
+ // can be used to identify the type of sampling that was applied to
+ // the root span. Most simple samplers would return two tags,
+ // sampler.type and sampler.param, similar to those used in the Configuration
+ IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
+
+ // Close does a clean shutdown of the sampler, stopping any background
+ // go-routines it may have started.
+ Close()
+
+ // Equal checks if the `other` sampler is functionally equivalent
+ // to this sampler.
+ // TODO remove this function. This function is used to determine if 2 samplers are equivalent
+ // which does not bode well with the adaptive sampler which has to create all the composite samplers
+ // for the comparison to occur. This is expensive to do if only one sampler has changed.
+ Equal(other Sampler) bool
+}
+
+// -----------------------
+
+// ConstSampler is a sampler that always makes the same decision.
+type ConstSampler struct {
+ Decision bool
+ tags []Tag
+}
+
+// NewConstSampler creates a ConstSampler.
+func NewConstSampler(sample bool) Sampler {
+ tags := []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeConst},
+ {key: SamplerParamTagKey, value: sample},
+ }
+ return &ConstSampler{Decision: sample, tags: tags}
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.Decision, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ConstSampler) Close() {
+ // nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ConstSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*ConstSampler); ok {
+ return s.Decision == o.Decision
+ }
+ return false
+}
+
+// -----------------------
+
+// ProbabilisticSampler is a sampler that randomly samples a certain percentage
+// of traces.
+type ProbabilisticSampler struct {
+ samplingRate float64
+ samplingBoundary uint64
+ tags []Tag
+}
+
+const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
+
+// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
+// samplingRate, in the range between 0.0 and 1.0.
+//
+// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
+// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
+// TODO remove the error from this function for next major release
+func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
+ if samplingRate < 0.0 || samplingRate > 1.0 {
+ return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+ }
+ return newProbabilisticSampler(samplingRate), nil
+}
+
+func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
+ samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
+ tags := []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
+ {key: SamplerParamTagKey, value: samplingRate},
+ }
+ return &ProbabilisticSampler{
+ samplingRate: samplingRate,
+ samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate),
+ tags: tags,
+ }
+}
+
+// SamplingRate returns the sampling probability this sampled was constructed with.
+func (s *ProbabilisticSampler) SamplingRate() float64 {
+ return s.samplingRate
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.samplingBoundary >= id.Low, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ProbabilisticSampler) Close() {
+ // nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ProbabilisticSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*ProbabilisticSampler); ok {
+ return s.samplingBoundary == o.samplingBoundary
+ }
+ return false
+}
+
+// -----------------------
+
+type rateLimitingSampler struct {
+ maxTracesPerSecond float64
+ rateLimiter utils.RateLimiter
+ tags []Tag
+}
+
+// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled
+// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those
+// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of
+// sequential requests can be sampled each second.
+func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler {
+ tags := []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
+ {key: SamplerParamTagKey, value: maxTracesPerSecond},
+ }
+ return &rateLimitingSampler{
+ maxTracesPerSecond: maxTracesPerSecond,
+ rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)),
+ tags: tags,
+ }
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.rateLimiter.CheckCredit(1.0), s.tags
+}
+
+func (s *rateLimitingSampler) Close() {
+ // nothing to do
+}
+
+func (s *rateLimitingSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*rateLimitingSampler); ok {
+ return s.maxTracesPerSecond == o.maxTracesPerSecond
+ }
+ return false
+}
+
+// -----------------------
+
+// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and
+// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that
+// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
+// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
+//
+// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
+// samplers return true, the tags for probabilisticSampler will be used.
+type GuaranteedThroughputProbabilisticSampler struct {
+ probabilisticSampler *ProbabilisticSampler
+ lowerBoundSampler Sampler
+ tags []Tag
+ samplingRate float64
+ lowerBound float64
+}
+
+// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
+// probabilisticSampler and rateLimitingSampler.
+func NewGuaranteedThroughputProbabilisticSampler(
+ lowerBound, samplingRate float64,
+) (*GuaranteedThroughputProbabilisticSampler, error) {
+ return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
+}
+
+func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
+ s := &GuaranteedThroughputProbabilisticSampler{
+ lowerBoundSampler: NewRateLimitingSampler(lowerBound),
+ lowerBound: lowerBound,
+ }
+ s.setProbabilisticSampler(samplingRate)
+ return s
+}
+
+func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
+ if s.probabilisticSampler == nil || s.samplingRate != samplingRate {
+ s.probabilisticSampler = newProbabilisticSampler(samplingRate)
+ s.samplingRate = s.probabilisticSampler.SamplingRate()
+ s.tags = []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
+ {key: SamplerParamTagKey, value: s.samplingRate},
+ }
+ }
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
+ s.lowerBoundSampler.IsSampled(id, operation)
+ return true, tags
+ }
+ sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
+ return sampled, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Close() {
+ s.probabilisticSampler.Close()
+ s.lowerBoundSampler.Close()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
+ // more information.
+ return false
+}
+
+// this function should only be called while holding a Write lock
+func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
+ s.setProbabilisticSampler(samplingRate)
+ if s.lowerBound != lowerBound {
+ s.lowerBoundSampler = NewRateLimitingSampler(lowerBound)
+ s.lowerBound = lowerBound
+ }
+}
+
+// -----------------------
+
+type adaptiveSampler struct {
+ sync.RWMutex
+
+ samplers map[string]*GuaranteedThroughputProbabilisticSampler
+ defaultSampler *ProbabilisticSampler
+ lowerBound float64
+ maxOperations int
+}
+
+// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and
+// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all
+// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler.
+func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) {
+ return newAdaptiveSampler(strategies, maxOperations), nil
+}
+
+func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler {
+ samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
+ for _, strategy := range strategies.PerOperationStrategies {
+ sampler := newGuaranteedThroughputProbabilisticSampler(
+ strategies.DefaultLowerBoundTracesPerSecond,
+ strategy.ProbabilisticSampling.SamplingRate,
+ )
+ samplers[strategy.Operation] = sampler
+ }
+ return &adaptiveSampler{
+ samplers: samplers,
+ defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability),
+ lowerBound: strategies.DefaultLowerBoundTracesPerSecond,
+ maxOperations: maxOperations,
+ }
+}
+
+func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ s.RLock()
+ sampler, ok := s.samplers[operation]
+ if ok {
+ defer s.RUnlock()
+ return sampler.IsSampled(id, operation)
+ }
+ s.RUnlock()
+ s.Lock()
+ defer s.Unlock()
+
+ // Check if sampler has already been created
+ sampler, ok = s.samplers[operation]
+ if ok {
+ return sampler.IsSampled(id, operation)
+ }
+ // Store only up to maxOperations of unique ops.
+ if len(s.samplers) >= s.maxOperations {
+ return s.defaultSampler.IsSampled(id, operation)
+ }
+ newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
+ s.samplers[operation] = newSampler
+ return newSampler.IsSampled(id, operation)
+}
+
+func (s *adaptiveSampler) Close() {
+ s.Lock()
+ defer s.Unlock()
+ for _, sampler := range s.samplers {
+ sampler.Close()
+ }
+ s.defaultSampler.Close()
+}
+
+func (s *adaptiveSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple
+ // samplers which all need to be initialized before this function can be called for a comparison.
+ // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need
+ // changing. Hence this function always returns false so that the update function can be called.
+ // Once the Equal() function is removed from the Sampler API, this will no longer be needed.
+ return false
+}
+
+func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
+ s.Lock()
+ defer s.Unlock()
+ for _, strategy := range strategies.PerOperationStrategies {
+ operation := strategy.Operation
+ samplingRate := strategy.ProbabilisticSampling.SamplingRate
+ lowerBound := strategies.DefaultLowerBoundTracesPerSecond
+ if sampler, ok := s.samplers[operation]; ok {
+ sampler.update(lowerBound, samplingRate)
+ } else {
+ sampler := newGuaranteedThroughputProbabilisticSampler(
+ lowerBound,
+ samplingRate,
+ )
+ s.samplers[operation] = sampler
+ }
+ }
+ s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
+ if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
+ s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
+ }
+}
+
+// -----------------------
+
+// RemotelyControlledSampler is a delegating sampler that polls a remote server
+// for the appropriate sampling strategy, constructs a corresponding sampler and
+// delegates to it for sampling decisions.
+type RemotelyControlledSampler struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ closed int64 // 0 - not closed, 1 - closed
+
+ sync.RWMutex
+ samplerOptions
+
+ serviceName string
+ manager sampling.SamplingManager
+ doneChan chan *sync.WaitGroup
+}
+
+type httpSamplingManager struct {
+ serverURL string
+}
+
+func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) {
+ var out sampling.SamplingStrategyResponse
+ v := url.Values{}
+ v.Set("service", serviceName)
+ if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+// NewRemotelyControlledSampler creates a sampler that periodically pulls
+// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
+func NewRemotelyControlledSampler(
+ serviceName string,
+ opts ...SamplerOption,
+) *RemotelyControlledSampler {
+ options := applySamplerOptions(opts...)
+ sampler := &RemotelyControlledSampler{
+ samplerOptions: options,
+ serviceName: serviceName,
+ manager: &httpSamplingManager{serverURL: options.samplingServerURL},
+ doneChan: make(chan *sync.WaitGroup),
+ }
+ go sampler.pollController()
+ return sampler
+}
+
+func applySamplerOptions(opts ...SamplerOption) samplerOptions {
+ options := samplerOptions{}
+ for _, option := range opts {
+ option(&options)
+ }
+ if options.sampler == nil {
+ options.sampler = newProbabilisticSampler(0.001)
+ }
+ if options.logger == nil {
+ options.logger = log.NullLogger
+ }
+ if options.maxOperations <= 0 {
+ options.maxOperations = defaultMaxOperations
+ }
+ if options.samplingServerURL == "" {
+ options.samplingServerURL = defaultSamplingServerURL
+ }
+ if options.metrics == nil {
+ options.metrics = NewNullMetrics()
+ }
+ if options.samplingRefreshInterval <= 0 {
+ options.samplingRefreshInterval = defaultSamplingRefreshInterval
+ }
+ return options
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ s.RLock()
+ defer s.RUnlock()
+ return s.sampler.IsSampled(id, operation)
+}
+
+// Close implements Close() of Sampler.
+func (s *RemotelyControlledSampler) Close() {
+ if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
+ s.logger.Error("Repeated attempt to close the sampler is ignored")
+ return
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ s.doneChan <- &wg
+ wg.Wait()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
+ // more information.
+ if o, ok := other.(*RemotelyControlledSampler); ok {
+ s.RLock()
+ o.RLock()
+ defer s.RUnlock()
+ defer o.RUnlock()
+ return s.sampler.Equal(o.sampler)
+ }
+ return false
+}
+
+func (s *RemotelyControlledSampler) pollController() {
+ ticker := time.NewTicker(s.samplingRefreshInterval)
+ defer ticker.Stop()
+ s.pollControllerWithTicker(ticker)
+}
+
+func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
+ for {
+ select {
+ case <-ticker.C:
+ s.updateSampler()
+ case wg := <-s.doneChan:
+ wg.Done()
+ return
+ }
+ }
+}
+
+func (s *RemotelyControlledSampler) getSampler() Sampler {
+ s.Lock()
+ defer s.Unlock()
+ return s.sampler
+}
+
+func (s *RemotelyControlledSampler) setSampler(sampler Sampler) {
+ s.Lock()
+ defer s.Unlock()
+ s.sampler = sampler
+}
+
+func (s *RemotelyControlledSampler) updateSampler() {
+ res, err := s.manager.GetSamplingStrategy(s.serviceName)
+ if err != nil {
+ s.metrics.SamplerQueryFailure.Inc(1)
+ s.logger.Infof("Unable to query sampling strategy: %v", err)
+ return
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ s.metrics.SamplerRetrieved.Inc(1)
+ if strategies := res.GetOperationSampling(); strategies != nil {
+ s.updateAdaptiveSampler(strategies)
+ } else {
+ err = s.updateRateLimitingOrProbabilisticSampler(res)
+ }
+ if err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err)
+ return
+ }
+ s.metrics.SamplerUpdated.Inc(1)
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) {
+ if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok {
+ adaptiveSampler.update(strategies)
+ } else {
+ s.sampler = newAdaptiveSampler(strategies, s.maxOperations)
+ }
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error {
+ var newSampler Sampler
+ if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil {
+ newSampler = newProbabilisticSampler(probabilistic.SamplingRate)
+ } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil {
+ newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond))
+ } else {
+ return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType())
+ }
+ if !s.sampler.Equal(newSampler) {
+ s.sampler = newSampler
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_options.go
new file mode 100644
index 000000000..75d28a561
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_options.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+)
+
+// SamplerOption is a function that sets some option on the sampler
+type SamplerOption func(options *samplerOptions)
+
+// SamplerOptions is a factory for all available SamplerOption's
+var SamplerOptions samplerOptions
+
+type samplerOptions struct {
+ metrics *Metrics
+ maxOperations int
+ sampler Sampler
+ logger Logger
+ samplingServerURL string
+ samplingRefreshInterval time.Duration
+}
+
+// Metrics creates a SamplerOption that initializes Metrics on the sampler,
+// which is used to emit statistics.
+func (samplerOptions) Metrics(m *Metrics) SamplerOption {
+ return func(o *samplerOptions) {
+ o.metrics = m
+ }
+}
+
+// MaxOperations creates a SamplerOption that sets the maximum number of
+// operations the sampler will keep track of.
+func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {
+ return func(o *samplerOptions) {
+ o.maxOperations = maxOperations
+ }
+}
+
+// InitialSampler creates a SamplerOption that sets the initial sampler
+// to use before a remote sampler is created and used.
+func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption {
+ return func(o *samplerOptions) {
+ o.sampler = sampler
+ }
+}
+
+// Logger creates a SamplerOption that sets the logger used by the sampler.
+func (samplerOptions) Logger(logger Logger) SamplerOption {
+ return func(o *samplerOptions) {
+ o.logger = logger
+ }
+}
+
+// SamplingServerURL creates a SamplerOption that sets the sampling server url
+// of the local agent that contains the sampling strategies.
+func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingServerURL = samplingServerURL
+ }
+}
+
+// SamplingRefreshInterval creates a SamplerOption that sets how often the
+// sampler will poll local agent for the appropriate sampling strategy.
+func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingRefreshInterval = samplingRefreshInterval
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
new file mode 100644
index 000000000..f0b497a90
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -0,0 +1,249 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// Span implements opentracing.Span
+type Span struct {
+ sync.RWMutex
+
+ tracer *Tracer
+
+ context SpanContext
+
+ // The name of the "operation" this span is an instance of.
+ // Known as a "span name" in some implementations.
+ operationName string
+
+ // firstInProcess, if true, indicates that this span is the root of the (sub)tree
+ // of spans in the current process. In other words it's true for the root spans,
+ // and the ingress spans when the process joins another trace.
+ firstInProcess bool
+
+ // startTime is the timestamp indicating when the span began, with microseconds precision.
+ startTime time.Time
+
+ // duration returns duration of the span with microseconds precision.
+ // Zero value means duration is unknown.
+ duration time.Duration
+
+ // tags attached to this span
+ tags []Tag
+
+ // The span's "micro-log"
+ logs []opentracing.LogRecord
+
+ // references for this span
+ references []Reference
+
+ observer ContribSpanObserver
+}
+
+// Tag is a simple key value wrapper.
+// TODO deprecate in the next major release, use opentracing.Tag instead.
+type Tag struct {
+ key string
+ value interface{}
+}
+
+// SetOperationName sets or changes the operation name.
+func (s *Span) SetOperationName(operationName string) opentracing.Span {
+ s.Lock()
+ defer s.Unlock()
+ if s.context.IsSampled() {
+ s.operationName = operationName
+ }
+ s.observer.OnSetOperationName(operationName)
+ return s
+}
+
+// SetTag implements SetTag() of opentracing.Span
+func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
+ s.observer.OnSetTag(key, value)
+ if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
+ return s
+ }
+ s.Lock()
+ defer s.Unlock()
+ if s.context.IsSampled() {
+ s.setTagNoLocking(key, value)
+ }
+ return s
+}
+
+func (s *Span) setTagNoLocking(key string, value interface{}) {
+ s.tags = append(s.tags, Tag{key: key, value: value})
+}
+
+// LogFields implements opentracing.Span API
+func (s *Span) LogFields(fields ...log.Field) {
+ s.Lock()
+ defer s.Unlock()
+ if !s.context.IsSampled() {
+ return
+ }
+ s.logFieldsNoLocking(fields...)
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) logFieldsNoLocking(fields ...log.Field) {
+ lr := opentracing.LogRecord{
+ Fields: fields,
+ Timestamp: time.Now(),
+ }
+ s.appendLog(lr)
+}
+
+// LogKV implements opentracing.Span API
+func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
+ s.RLock()
+ sampled := s.context.IsSampled()
+ s.RUnlock()
+ if !sampled {
+ return
+ }
+ fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
+ if err != nil {
+ s.LogFields(log.Error(err), log.String("function", "LogKV"))
+ return
+ }
+ s.LogFields(fields...)
+}
+
+// LogEvent implements opentracing.Span API
+func (s *Span) LogEvent(event string) {
+ s.Log(opentracing.LogData{Event: event})
+}
+
+// LogEventWithPayload implements opentracing.Span API
+func (s *Span) LogEventWithPayload(event string, payload interface{}) {
+ s.Log(opentracing.LogData{Event: event, Payload: payload})
+}
+
+// Log implements opentracing.Span API
+func (s *Span) Log(ld opentracing.LogData) {
+ s.Lock()
+ defer s.Unlock()
+ if s.context.IsSampled() {
+ if ld.Timestamp.IsZero() {
+ ld.Timestamp = s.tracer.timeNow()
+ }
+ s.appendLog(ld.ToLogRecord())
+ }
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) appendLog(lr opentracing.LogRecord) {
+ // TODO add logic to limit number of logs per span (issue #46)
+ s.logs = append(s.logs, lr)
+}
+
+// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
+func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
+ s.Lock()
+ defer s.Unlock()
+ s.tracer.setBaggage(s, key, value)
+ return s
+}
+
+// BaggageItem implements BaggageItem() of opentracing.SpanContext
+func (s *Span) BaggageItem(key string) string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.context.baggage[key]
+}
+
+// Finish implements opentracing.Span API
+func (s *Span) Finish() {
+ s.FinishWithOptions(opentracing.FinishOptions{})
+}
+
+// FinishWithOptions implements opentracing.Span API
+func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
+ if options.FinishTime.IsZero() {
+ options.FinishTime = s.tracer.timeNow()
+ }
+ s.observer.OnFinish(options)
+ s.Lock()
+ if s.context.IsSampled() {
+ s.duration = options.FinishTime.Sub(s.startTime)
+ // Note: bulk logs are not subject to maxLogsPerSpan limit
+ if options.LogRecords != nil {
+ s.logs = append(s.logs, options.LogRecords...)
+ }
+ for _, ld := range options.BulkLogData {
+ s.logs = append(s.logs, ld.ToLogRecord())
+ }
+ }
+ s.Unlock()
+ // call reportSpan even for non-sampled traces, to return span to the pool
+ s.tracer.reportSpan(s)
+}
+
+// Context implements opentracing.Span API
+func (s *Span) Context() opentracing.SpanContext {
+ s.Lock()
+ defer s.Unlock()
+ return s.context
+}
+
+// Tracer implements opentracing.Span API
+func (s *Span) Tracer() opentracing.Tracer {
+ return s.tracer
+}
+
+func (s *Span) String() string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.context.String()
+}
+
+// OperationName allows retrieving current operation name.
+func (s *Span) OperationName() string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.operationName
+}
+
+func (s *Span) serviceName() string {
+ return s.tracer.serviceName
+}
+
+// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
+func setSamplingPriority(s *Span, value interface{}) bool {
+ s.Lock()
+ defer s.Unlock()
+ val, ok := value.(uint16)
+ if !ok {
+ return false
+ }
+ if val == 0 {
+ s.context.flags = s.context.flags & (^flagSampled)
+ return true
+ }
+ if s.tracer.isDebugAllowed(s.operationName) {
+ s.context.flags = s.context.flags | flagDebug | flagSampled
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
new file mode 100644
index 000000000..e48811c50
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
@@ -0,0 +1,411 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+type Agent interface {
+ // Parameters:
+ // - Spans
+ EmitZipkinBatch(spans []*zipkincore.Span) (err error)
+ // Parameters:
+ // - Batch
+ EmitBatch(batch *jaeger.Batch) (err error)
+}
+
+type AgentClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - Spans
+func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) {
+ if err = p.sendEmitZipkinBatch(spans); err != nil {
+ return
+ }
+ return
+}
+
+func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil {
+ return
+ }
+ args := AgentEmitZipkinBatchArgs{
+ Spans: spans,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+// Parameters:
+// - Batch
+func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) {
+ if err = p.sendEmitBatch(batch); err != nil {
+ return
+ }
+ return
+}
+
+func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
+ return
+ }
+ args := AgentEmitBatchArgs{
+ Batch: batch,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+type AgentProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+ self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
+ self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
+ return self0
+}
+
+func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x1.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x1
+
+}
+
+type agentProcessorEmitZipkinBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitZipkinBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil {
+ return true, err2
+ }
+ return true, nil
+}
+
+type agentProcessorEmitBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
+ return true, err2
+ }
+ return true, nil
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Spans
+type AgentEmitZipkinBatchArgs struct {
+ Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"`
+}
+
+func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
+ return &AgentEmitZipkinBatchArgs{}
+}
+
+func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
+ return p.Spans
+}
+func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*zipkincore.Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i++ {
+ _elem2 := &zipkincore.Span{}
+ if err := _elem2.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+ }
+ p.Spans = append(p.Spans, _elem2)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+ }
+ return err
+}
+
+func (p *AgentEmitZipkinBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Batch
+type AgentEmitBatchArgs struct {
+ Batch *jaeger.Batch `thrift:"batch,1" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+ return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
+
+func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
+ if !p.IsSetBatch() {
+ return AgentEmitBatchArgs_Batch_DEFAULT
+ }
+ return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+ return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
+ p.Batch = &jaeger.Batch{}
+ if err := p.Batch.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
+ }
+ if err := p.Batch.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
+ }
+ return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
new file mode 100644
index 000000000..aa9857bb8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
@@ -0,0 +1,23 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
new file mode 100644
index 000000000..9c28f11c1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
@@ -0,0 +1,21 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+var GoUnusedProtection__ int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
new file mode 100644
index 000000000..1f79c1255
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
@@ -0,0 +1,435 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package baggage
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type BaggageRestrictionManager interface {
+ // getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+ // Usually, baggageRestrictions apply to all services however there may be situations
+ // where a baggageKey might only be allowed to be set by a specific service.
+ //
+ // Parameters:
+ // - ServiceName
+ GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error)
+}
+
+type BaggageRestrictionManagerClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
+ return &BaggageRestrictionManagerClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
+ return &BaggageRestrictionManagerClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+// Usually, baggageRestrictions apply to all services however there may be situations
+// where a baggageKey might only be allowed to be set by a specific service.
+//
+// Parameters:
+// - ServiceName
+func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) {
+ if err = p.sendGetBaggageRestrictions(serviceName); err != nil {
+ return
+ }
+ return p.recvGetBaggageRestrictions()
+}
+
+func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{
+ ServiceName: serviceName,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
+ }
+ if method != "getBaggageRestrictions" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error1 error
+ error1, err = error0.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error1
+ return
+ }
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type")
+ return
+ }
+ result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ value = result.GetSuccess()
+ return
+}
+
+type BaggageRestrictionManagerProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler BaggageRestrictionManager
+}
+
+func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
+
+ self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler}
+ return self2
+}
+
+func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x3.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x3
+
+}
+
+type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
+ handler BaggageRestrictionManager
+}
+
+func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+ var retval []*BaggageRestriction
+ var err2 error
+ if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error())
+ oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - ServiceName
+type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
+ ServiceName string `thrift:"serviceName,1" json:"serviceName"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
+ return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+ }
+ return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
+ Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
+ return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+}
+
+var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
+ return p.Success
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.readField0(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BaggageRestriction, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i++ {
+ _elem4 := &BaggageRestriction{}
+ if err := _elem4.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Success = append(p.Success, _elem4)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
new file mode 100644
index 000000000..ed35ce9ab
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package baggage
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
new file mode 100644
index 000000000..7888892f6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
@@ -0,0 +1,154 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package baggage
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+// Attributes:
+// - BaggageKey
+// - MaxValueLength
+type BaggageRestriction struct {
+ BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"`
+ MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"`
+}
+
+func NewBaggageRestriction() *BaggageRestriction {
+ return &BaggageRestriction{}
+}
+
+func (p *BaggageRestriction) GetBaggageKey() string {
+ return p.BaggageKey
+}
+
+func (p *BaggageRestriction) GetMaxValueLength() int32 {
+ return p.MaxValueLength
+}
+func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetBaggageKey bool = false
+ var issetMaxValueLength bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetBaggageKey = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetMaxValueLength = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetBaggageKey {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"))
+ }
+ if !issetMaxValueLength {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"))
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.BaggageKey = v
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.MaxValueLength = v
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.BaggageKey)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err)
+ }
+ return err
+}
+
+func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err)
+ }
+ return err
+}
+
+func (p *BaggageRestriction) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestriction(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
new file mode 100644
index 000000000..b32c37dd2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
@@ -0,0 +1,242 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type Agent interface {
+ // Parameters:
+ // - Batch
+ EmitBatch(batch *Batch) (err error)
+}
+
+type AgentClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+ return &AgentClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - Batch
+func (p *AgentClient) EmitBatch(batch *Batch) (err error) {
+ if err = p.sendEmitBatch(batch); err != nil {
+ return
+ }
+ return
+}
+
+func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
+ return
+ }
+ args := AgentEmitBatchArgs{
+ Batch: batch,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+type AgentProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+ self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
+ return self6
+}
+
+func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x7.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x7
+
+}
+
+type agentProcessorEmitBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
+ return true, err2
+ }
+ return true, nil
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Batch
+type AgentEmitBatchArgs struct {
+ Batch *Batch `thrift:"batch,1" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+ return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *Batch
+
+func (p *AgentEmitBatchArgs) GetBatch() *Batch {
+ if !p.IsSetBatch() {
+ return AgentEmitBatchArgs_Batch_DEFAULT
+ }
+ return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+ return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
+ p.Batch = &Batch{}
+ if err := p.Batch.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
+ }
+ if err := p.Batch.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
+ }
+ return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
new file mode 100644
index 000000000..621b8b1c2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
new file mode 100644
index 000000000..d23ed2fc2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
@@ -0,0 +1,1838 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type TagType int64
+
+const (
+ TagType_STRING TagType = 0
+ TagType_DOUBLE TagType = 1
+ TagType_BOOL TagType = 2
+ TagType_LONG TagType = 3
+ TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+ switch p {
+ case TagType_STRING:
+ return "STRING"
+ case TagType_DOUBLE:
+ return "DOUBLE"
+ case TagType_BOOL:
+ return "BOOL"
+ case TagType_LONG:
+ return "LONG"
+ case TagType_BINARY:
+ return "BINARY"
+ }
+ return "<UNSET>"
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+ switch s {
+ case "STRING":
+ return TagType_STRING, nil
+ case "DOUBLE":
+ return TagType_DOUBLE, nil
+ case "BOOL":
+ return TagType_BOOL, nil
+ case "LONG":
+ return TagType_LONG, nil
+ case "BINARY":
+ return TagType_BINARY, nil
+ }
+ return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+ q, err := TagTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+type SpanRefType int64
+
+const (
+ SpanRefType_CHILD_OF SpanRefType = 0
+ SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+ switch p {
+ case SpanRefType_CHILD_OF:
+ return "CHILD_OF"
+ case SpanRefType_FOLLOWS_FROM:
+ return "FOLLOWS_FROM"
+ }
+ return "<UNSET>"
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+ switch s {
+ case "CHILD_OF":
+ return SpanRefType_CHILD_OF, nil
+ case "FOLLOWS_FROM":
+ return SpanRefType_FOLLOWS_FROM, nil
+ }
+ return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+ q, err := SpanRefTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+// Attributes:
+// - Key
+// - VType
+// - VStr
+// - VDouble
+// - VBool
+// - VLong
+// - VBinary
+type Tag struct {
+ Key string `thrift:"key,1,required" json:"key"`
+ VType TagType `thrift:"vType,2,required" json:"vType"`
+ VStr *string `thrift:"vStr,3" json:"vStr,omitempty"`
+ VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"`
+ VBool *bool `thrift:"vBool,5" json:"vBool,omitempty"`
+ VLong *int64 `thrift:"vLong,6" json:"vLong,omitempty"`
+ VBinary []byte `thrift:"vBinary,7" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+ return &Tag{}
+}
+
+func (p *Tag) GetKey() string {
+ return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+ return p.VType
+}
+
+var Tag_VStr_DEFAULT string
+
+func (p *Tag) GetVStr() string {
+ if !p.IsSetVStr() {
+ return Tag_VStr_DEFAULT
+ }
+ return *p.VStr
+}
+
+var Tag_VDouble_DEFAULT float64
+
+func (p *Tag) GetVDouble() float64 {
+ if !p.IsSetVDouble() {
+ return Tag_VDouble_DEFAULT
+ }
+ return *p.VDouble
+}
+
+var Tag_VBool_DEFAULT bool
+
+func (p *Tag) GetVBool() bool {
+ if !p.IsSetVBool() {
+ return Tag_VBool_DEFAULT
+ }
+ return *p.VBool
+}
+
+var Tag_VLong_DEFAULT int64
+
+func (p *Tag) GetVLong() int64 {
+ if !p.IsSetVLong() {
+ return Tag_VLong_DEFAULT
+ }
+ return *p.VLong
+}
+
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+ return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+ return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+ return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+ return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+ return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+ return p.VBinary != nil
+}
+
+func (p *Tag) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetKey bool = false
+ var issetVType bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetKey = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetVType = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ case 5:
+ if err := p.readField5(iprot); err != nil {
+ return err
+ }
+ case 6:
+ if err := p.readField6(iprot); err != nil {
+ return err
+ }
+ case 7:
+ if err := p.readField7(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetKey {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
+ }
+ if !issetVType {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
+ }
+ return nil
+}
+
+func (p *Tag) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Key = v
+ }
+ return nil
+}
+
+func (p *Tag) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ temp := TagType(v)
+ p.VType = temp
+ }
+ return nil
+}
+
+func (p *Tag) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.VStr = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.VDouble = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.VBool = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField6(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 6: ", err)
+ } else {
+ p.VLong = &v
+ }
+ return nil
+}
+
+func (p *Tag) readField7(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+ } else {
+ p.VBinary = v
+ }
+ return nil
+}
+
+func (p *Tag) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Tag"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField6(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField7(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+ }
+ return err
+}
+
+func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.VType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
+ }
+ return err
+}
+
+func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVStr() {
+ if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
+ }
+ if err := oprot.WriteString(string(*p.VStr)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVDouble() {
+ if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBool() {
+ if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(*p.VBool)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVLong() {
+ if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.VLong)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBinary() {
+ if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
+ }
+ if err := oprot.WriteBinary(p.VBinary); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Tag) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+// - Timestamp
+// - Fields
+type Log struct {
+ Timestamp int64 `thrift:"timestamp,1,required" json:"timestamp"`
+ Fields []*Tag `thrift:"fields,2,required" json:"fields"`
+}
+
+func NewLog() *Log {
+ return &Log{}
+}
+
+func (p *Log) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+ return p.Fields
+}
+func (p *Log) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTimestamp bool = false
+ var issetFields bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetTimestamp = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetFields = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTimestamp {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
+ }
+ if !issetFields {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
+ }
+ return nil
+}
+
+func (p *Log) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Timestamp = v
+ }
+ return nil
+}
+
+func (p *Log) readField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Fields = tSlice
+ for i := 0; i < size; i++ {
+ _elem0 := &Tag{}
+ if err := _elem0.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Fields = append(p.Fields, _elem0)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Log) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Log"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Log) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+ }
+ return err
+}
+
+func (p *Log) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Fields {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
+ }
+ return err
+}
+
+func (p *Log) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+// - RefType
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+type SpanRef struct {
+ RefType SpanRefType `thrift:"refType,1,required" json:"refType"`
+ TraceIdLow int64 `thrift:"traceIdLow,2,required" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,3,required" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,4,required" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+ return &SpanRef{}
+}
+
+func (p *SpanRef) GetRefType() SpanRefType {
+ return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+ return p.SpanId
+}
+func (p *SpanRef) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetRefType bool = false
+ var issetTraceIdLow bool = false
+ var issetTraceIdHigh bool = false
+ var issetSpanId bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetRefType = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetRefType {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
+ }
+ if !issetTraceIdLow {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+ }
+ if !issetTraceIdHigh {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+ }
+ if !issetSpanId {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+ }
+ return nil
+}
+
+func (p *SpanRef) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ temp := SpanRefType(v)
+ p.RefType = temp
+ }
+ return nil
+}
+
+func (p *SpanRef) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.TraceIdLow = v
+ }
+ return nil
+}
+
+func (p *SpanRef) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.TraceIdHigh = v
+ }
+ return nil
+}
+
+func (p *SpanRef) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.SpanId = v
+ }
+ return nil
+}
+
+func (p *SpanRef) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("SpanRef"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.RefType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
+ }
+ return err
+}
+
+func (p *SpanRef) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+// - ParentSpanId
+// - OperationName
+// - References
+// - Flags
+// - StartTime
+// - Duration
+// - Tags
+// - Logs
+type Span struct {
+ TraceIdLow int64 `thrift:"traceIdLow,1,required" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,2,required" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,3,required" json:"spanId"`
+ ParentSpanId int64 `thrift:"parentSpanId,4,required" json:"parentSpanId"`
+ OperationName string `thrift:"operationName,5,required" json:"operationName"`
+ References []*SpanRef `thrift:"references,6" json:"references,omitempty"`
+ Flags int32 `thrift:"flags,7,required" json:"flags"`
+ StartTime int64 `thrift:"startTime,8,required" json:"startTime"`
+ Duration int64 `thrift:"duration,9,required" json:"duration"`
+ Tags []*Tag `thrift:"tags,10" json:"tags,omitempty"`
+ Logs []*Log `thrift:"logs,11" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+func (p *Span) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+ return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+ return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+ return p.OperationName
+}
+
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+ return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+ return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+ return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+ return p.Duration
+}
+
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+ return p.Tags
+}
+
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+ return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+ return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+ return p.Logs != nil
+}
+
+func (p *Span) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTraceIdLow bool = false
+ var issetTraceIdHigh bool = false
+ var issetSpanId bool = false
+ var issetParentSpanId bool = false
+ var issetOperationName bool = false
+ var issetFlags bool = false
+ var issetStartTime bool = false
+ var issetDuration bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ issetParentSpanId = true
+ case 5:
+ if err := p.readField5(iprot); err != nil {
+ return err
+ }
+ issetOperationName = true
+ case 6:
+ if err := p.readField6(iprot); err != nil {
+ return err
+ }
+ case 7:
+ if err := p.readField7(iprot); err != nil {
+ return err
+ }
+ issetFlags = true
+ case 8:
+ if err := p.readField8(iprot); err != nil {
+ return err
+ }
+ issetStartTime = true
+ case 9:
+ if err := p.readField9(iprot); err != nil {
+ return err
+ }
+ issetDuration = true
+ case 10:
+ if err := p.readField10(iprot); err != nil {
+ return err
+ }
+ case 11:
+ if err := p.readField11(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTraceIdLow {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+ }
+ if !issetTraceIdHigh {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+ }
+ if !issetSpanId {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+ }
+ if !issetParentSpanId {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
+ }
+ if !issetOperationName {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
+ }
+ if !issetFlags {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
+ }
+ if !issetStartTime {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
+ }
+ if !issetDuration {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
+ }
+ return nil
+}
+
+func (p *Span) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.TraceIdLow = v
+ }
+ return nil
+}
+
+func (p *Span) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.TraceIdHigh = v
+ }
+ return nil
+}
+
+func (p *Span) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.SpanId = v
+ }
+ return nil
+}
+
+func (p *Span) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.ParentSpanId = v
+ }
+ return nil
+}
+
+func (p *Span) readField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.OperationName = v
+ }
+ return nil
+}
+
+func (p *Span) readField6(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*SpanRef, 0, size)
+ p.References = tSlice
+ for i := 0; i < size; i++ {
+ _elem1 := &SpanRef{}
+ if err := _elem1.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+ }
+ p.References = append(p.References, _elem1)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField7(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+ } else {
+ p.Flags = v
+ }
+ return nil
+}
+
+func (p *Span) readField8(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 8: ", err)
+ } else {
+ p.StartTime = v
+ }
+ return nil
+}
+
+func (p *Span) readField9(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+ } else {
+ p.Duration = v
+ }
+ return nil
+}
+
+func (p *Span) readField10(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i++ {
+ _elem2 := &Tag{}
+ if err := _elem2.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+ }
+ p.Tags = append(p.Tags, _elem2)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField11(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Log, 0, size)
+ p.Logs = tSlice
+ for i := 0; i < size; i++ {
+ _elem3 := &Log{}
+ if err := _elem3.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+ }
+ p.Logs = append(p.Logs, _elem3)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField6(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField7(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField8(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField9(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField10(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField11(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.OperationName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetReferences() {
+ if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.References {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField7(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.Flags)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.StartTime)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetLogs() {
+ if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Logs {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - ServiceName
+// - Tags
+type Process struct {
+ ServiceName string `thrift:"serviceName,1,required" json:"serviceName"`
+ Tags []*Tag `thrift:"tags,2" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+ return &Process{}
+}
+
+func (p *Process) GetServiceName() string {
+ return p.ServiceName
+}
+
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+ return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Process) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetServiceName bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetServiceName = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetServiceName {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
+ }
+ return nil
+}
+
+func (p *Process) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *Process) readField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i++ {
+ _elem4 := &Tag{}
+ if err := _elem4.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Tags = append(p.Tags, _elem4)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Process) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Process"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Process) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+ }
+ return err
+}
+
+func (p *Process) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Process) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+// - Process
+// - Spans
+type Batch struct {
+ Process *Process `thrift:"process,1,required" json:"process"`
+ Spans []*Span `thrift:"spans,2,required" json:"spans"`
+}
+
+func NewBatch() *Batch {
+ return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+
+func (p *Batch) GetProcess() *Process {
+ if !p.IsSetProcess() {
+ return Batch_Process_DEFAULT
+ }
+ return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+ return p.Spans
+}
+func (p *Batch) IsSetProcess() bool {
+ return p.Process != nil
+}
+
+func (p *Batch) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetProcess bool = false
+ var issetSpans bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetProcess = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetSpans = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetProcess {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
+ }
+ if !issetSpans {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
+ }
+ return nil
+}
+
+func (p *Batch) readField1(iprot thrift.TProtocol) error {
+ p.Process = &Process{}
+ if err := p.Process.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+ }
+ return nil
+}
+
+func (p *Batch) readField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i++ {
+ _elem5 := &Span{}
+ if err := _elem5.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
+ }
+ p.Spans = append(p.Spans, _elem5)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Batch) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Batch"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
+ }
+ if err := p.Process.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
+ }
+ return err
+}
+
+func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
+ }
+ return err
+}
+
+func (p *Batch) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type BatchSubmitResponse struct {
+ Ok bool `thrift:"ok,1,required" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+ return &BatchSubmitResponse{}
+}
+
+func (p *BatchSubmitResponse) GetOk() bool {
+ return p.Ok
+}
+func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Ok = v
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+ }
+ return err
+}
+
+func (p *BatchSubmitResponse) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
new file mode 100644
index 000000000..0f6e3a884
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
new file mode 100644
index 000000000..33179cfeb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
@@ -0,0 +1,410 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type SamplingManager interface {
+ // Parameters:
+ // - ServiceName
+ GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error)
+}
+
+type SamplingManagerClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
+ return &SamplingManagerClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
+ return &SamplingManagerClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - ServiceName
+func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) {
+ if err = p.sendGetSamplingStrategy(serviceName); err != nil {
+ return
+ }
+ return p.recvGetSamplingStrategy()
+}
+
+func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := SamplingManagerGetSamplingStrategyArgs{
+ ServiceName: serviceName,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
+ }
+ if method != "getSamplingStrategy" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error2 error
+ error2, err = error1.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error2
+ return
+ }
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type")
+ return
+ }
+ result := SamplingManagerGetSamplingStrategyResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ value = result.GetSuccess()
+ return
+}
+
+type SamplingManagerProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler SamplingManager
+}
+
+func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
+
+ self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler}
+ return self3
+}
+
+func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x4.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x4
+
+}
+
+type samplingManagerProcessorGetSamplingStrategy struct {
+ handler SamplingManager
+}
+
+func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := SamplingManagerGetSamplingStrategyArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := SamplingManagerGetSamplingStrategyResult{}
+ var retval *SamplingStrategyResponse
+ var err2 error
+ if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error())
+ oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - ServiceName
+type SamplingManagerGetSamplingStrategyArgs struct {
+ ServiceName string `thrift:"serviceName,1" json:"serviceName"`
+}
+
+func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
+ return &SamplingManagerGetSamplingStrategyArgs{}
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+ }
+ return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type SamplingManagerGetSamplingStrategyResult struct {
+ Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
+ return &SamplingManagerGetSamplingStrategyResult{}
+}
+
+var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
+
+func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
+ if !p.IsSetSuccess() {
+ return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
+ }
+ return p.Success
+}
+func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.readField0(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error {
+ p.Success = &SamplingStrategyResponse{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
new file mode 100644
index 000000000..9abaf0542
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
@@ -0,0 +1,873 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type SamplingStrategyType int64
+
+const (
+ SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
+ SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
+)
+
+func (p SamplingStrategyType) String() string {
+ switch p {
+ case SamplingStrategyType_PROBABILISTIC:
+ return "PROBABILISTIC"
+ case SamplingStrategyType_RATE_LIMITING:
+ return "RATE_LIMITING"
+ }
+ return "<UNSET>"
+}
+
+func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
+ switch s {
+ case "PROBABILISTIC":
+ return SamplingStrategyType_PROBABILISTIC, nil
+ case "RATE_LIMITING":
+ return SamplingStrategyType_RATE_LIMITING, nil
+ }
+ return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
+}
+
+func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
+
+func (p SamplingStrategyType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
+ q, err := SamplingStrategyTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+// Attributes:
+// - SamplingRate
+type ProbabilisticSamplingStrategy struct {
+ SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"`
+}
+
+func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
+ return &ProbabilisticSamplingStrategy{}
+}
+
+func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
+ return p.SamplingRate
+}
+func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetSamplingRate bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetSamplingRate = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetSamplingRate {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"))
+ }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.SamplingRate = v
+ }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err)
+ }
+ return err
+}
+
+func (p *ProbabilisticSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - MaxTracesPerSecond
+type RateLimitingSamplingStrategy struct {
+ MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"`
+}
+
+func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
+ return &RateLimitingSamplingStrategy{}
+}
+
+func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
+ return p.MaxTracesPerSecond
+}
+func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetMaxTracesPerSecond bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetMaxTracesPerSecond = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetMaxTracesPerSecond {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"))
+ }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI16(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.MaxTracesPerSecond = v
+ }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err)
+ }
+ if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err)
+ }
+ return err
+}
+
+func (p *RateLimitingSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - Operation
+// - ProbabilisticSampling
+type OperationSamplingStrategy struct {
+ Operation string `thrift:"operation,1,required" json:"operation"`
+ ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"`
+}
+
+func NewOperationSamplingStrategy() *OperationSamplingStrategy {
+ return &OperationSamplingStrategy{}
+}
+
+func (p *OperationSamplingStrategy) GetOperation() string {
+ return p.Operation
+}
+
+var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+
+func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+ if !p.IsSetProbabilisticSampling() {
+ return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
+ }
+ return p.ProbabilisticSampling
+}
+func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
+ return p.ProbabilisticSampling != nil
+}
+
+func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOperation bool = false
+ var issetProbabilisticSampling bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetOperation = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetProbabilisticSampling = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOperation {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"))
+ }
+ if !issetProbabilisticSampling {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"))
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Operation = v
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error {
+ p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+ if err := p.ProbabilisticSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Operation)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err)
+ }
+ return err
+}
+
+func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
+ }
+ if err := p.ProbabilisticSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
+ }
+ return err
+}
+
+func (p *OperationSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - DefaultSamplingProbability
+// - DefaultLowerBoundTracesPerSecond
+// - PerOperationStrategies
+// - DefaultUpperBoundTracesPerSecond
+type PerOperationSamplingStrategies struct {
+ DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"`
+ DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"`
+ PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"`
+ DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"`
+}
+
+func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
+ return &PerOperationSamplingStrategies{}
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
+ return p.DefaultSamplingProbability
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
+ return p.DefaultLowerBoundTracesPerSecond
+}
+
+func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
+ return p.PerOperationStrategies
+}
+
+var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
+
+func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
+ if !p.IsSetDefaultUpperBoundTracesPerSecond() {
+ return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
+ }
+ return *p.DefaultUpperBoundTracesPerSecond
+}
+func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
+ return p.DefaultUpperBoundTracesPerSecond != nil
+}
+
+func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetDefaultSamplingProbability bool = false
+ var issetDefaultLowerBoundTracesPerSecond bool = false
+ var issetPerOperationStrategies bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetDefaultSamplingProbability = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetDefaultLowerBoundTracesPerSecond = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ issetPerOperationStrategies = true
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetDefaultSamplingProbability {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"))
+ }
+ if !issetDefaultLowerBoundTracesPerSecond {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"))
+ }
+ if !issetPerOperationStrategies {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"))
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.DefaultSamplingProbability = v
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.DefaultLowerBoundTracesPerSecond = v
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*OperationSamplingStrategy, 0, size)
+ p.PerOperationStrategies = tSlice
+ for i := 0; i < size; i++ {
+ _elem0 := &OperationSamplingStrategy{}
+ if err := _elem0.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.DefaultUpperBoundTracesPerSecond = &v
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err)
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err)
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.PerOperationStrategies {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err)
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDefaultUpperBoundTracesPerSecond() {
+ if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err)
+ }
+ if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
+}
+
+// Attributes:
+// - StrategyType
+// - ProbabilisticSampling
+// - RateLimitingSampling
+// - OperationSampling
+type SamplingStrategyResponse struct {
+ StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"`
+ ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"`
+ RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"`
+ OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"`
+}
+
+func NewSamplingStrategyResponse() *SamplingStrategyResponse {
+ return &SamplingStrategyResponse{}
+}
+
+func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
+ return p.StrategyType
+}
+
+var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+
+func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+ if !p.IsSetProbabilisticSampling() {
+ return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
+ }
+ return p.ProbabilisticSampling
+}
+
+var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
+
+func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
+ if !p.IsSetRateLimitingSampling() {
+ return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
+ }
+ return p.RateLimitingSampling
+}
+
+var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
+
+func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
+ if !p.IsSetOperationSampling() {
+ return SamplingStrategyResponse_OperationSampling_DEFAULT
+ }
+ return p.OperationSampling
+}
+func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
+ return p.ProbabilisticSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
+ return p.RateLimitingSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
+ return p.OperationSampling != nil
+}
+
+func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetStrategyType bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetStrategyType = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetStrategyType {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"))
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ temp := SamplingStrategyType(v)
+ p.StrategyType = temp
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error {
+ p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+ if err := p.ProbabilisticSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error {
+ p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
+ if err := p.RateLimitingSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error {
+ p.OperationSampling = &PerOperationSamplingStrategies{}
+ if err := p.OperationSampling.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.StrategyType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err)
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetProbabilisticSampling() {
+ if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
+ }
+ if err := p.ProbabilisticSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRateLimitingSampling() {
+ if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err)
+ }
+ if err := p.RateLimitingSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetOperationSampling() {
+ if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err)
+ }
+ if err := p.OperationSampling.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
new file mode 100644
index 000000000..f05144bfc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
@@ -0,0 +1,32 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+const CLIENT_SEND = "cs"
+const CLIENT_RECV = "cr"
+const SERVER_SEND = "ss"
+const SERVER_RECV = "sr"
+const WIRE_SEND = "ws"
+const WIRE_RECV = "wr"
+const CLIENT_SEND_FRAGMENT = "csf"
+const CLIENT_RECV_FRAGMENT = "crf"
+const SERVER_SEND_FRAGMENT = "ssf"
+const SERVER_RECV_FRAGMENT = "srf"
+const LOCAL_COMPONENT = "lc"
+const CLIENT_ADDR = "ca"
+const SERVER_ADDR = "sa"
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
new file mode 100644
index 000000000..34b2b267e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
@@ -0,0 +1,1247 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type AnnotationType int64
+
+const (
+ AnnotationType_BOOL AnnotationType = 0
+ AnnotationType_BYTES AnnotationType = 1
+ AnnotationType_I16 AnnotationType = 2
+ AnnotationType_I32 AnnotationType = 3
+ AnnotationType_I64 AnnotationType = 4
+ AnnotationType_DOUBLE AnnotationType = 5
+ AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+ switch p {
+ case AnnotationType_BOOL:
+ return "BOOL"
+ case AnnotationType_BYTES:
+ return "BYTES"
+ case AnnotationType_I16:
+ return "I16"
+ case AnnotationType_I32:
+ return "I32"
+ case AnnotationType_I64:
+ return "I64"
+ case AnnotationType_DOUBLE:
+ return "DOUBLE"
+ case AnnotationType_STRING:
+ return "STRING"
+ }
+ return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+ switch s {
+ case "BOOL":
+ return AnnotationType_BOOL, nil
+ case "BYTES":
+ return AnnotationType_BYTES, nil
+ case "I16":
+ return AnnotationType_I16, nil
+ case "I32":
+ return AnnotationType_I32, nil
+ case "I64":
+ return AnnotationType_I64, nil
+ case "DOUBLE":
+ return AnnotationType_DOUBLE, nil
+ case "STRING":
+ return AnnotationType_STRING, nil
+ }
+ return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+ return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+ q, err := AnnotationTypeFromString(string(text))
+ if err != nil {
+ return err
+ }
+ *p = q
+ return nil
+}
+
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+//
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+//
+// Attributes:
+// - Ipv4: IPv4 host address packed into 4 bytes.
+//
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+// - Port: IPv4 port
+//
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+//
+// Conventionally, when the port isn't known, port = 0.
+// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+//
+// Conventionally, when the service name isn't known, service_name = "unknown".
+type Endpoint struct {
+ Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"`
+ Port int16 `thrift:"port,2" json:"port"`
+ ServiceName string `thrift:"service_name,3" json:"service_name"`
+}
+
+func NewEndpoint() *Endpoint {
+ return &Endpoint{}
+}
+
+func (p *Endpoint) GetIpv4() int32 {
+ return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+ return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *Endpoint) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Endpoint) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Ipv4 = v
+ }
+ return nil
+}
+
+func (p *Endpoint) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI16(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.Port = v
+ }
+ return nil
+}
+
+func (p *Endpoint) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.ServiceName = v
+ }
+ return nil
+}
+
+func (p *Endpoint) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Endpoint"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.Ipv4)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
+ }
+ return err
+}
+
+func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
+ }
+ if err := oprot.WriteI16(int16(p.Port)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
+ }
+ return err
+}
+
+func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
+ }
+ return err
+}
+
+func (p *Endpoint) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+//
+// Attributes:
+// - Timestamp: Microseconds from epoch.
+//
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+// - Value
+// - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+ Timestamp int64 `thrift:"timestamp,1" json:"timestamp"`
+ Value string `thrift:"value,2" json:"value"`
+ Host *Endpoint `thrift:"host,3" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+ return &Annotation{}
+}
+
+func (p *Annotation) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+ return p.Value
+}
+
+var Annotation_Host_DEFAULT *Endpoint
+
+func (p *Annotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return Annotation_Host_DEFAULT
+ }
+ return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *Annotation) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Annotation) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Timestamp = v
+ }
+ return nil
+}
+
+func (p *Annotation) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.Value = v
+ }
+ return nil
+}
+
+func (p *Annotation) readField3(iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *Annotation) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Annotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+ }
+ return err
+}
+
+func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Value)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+ }
+ return err
+}
+
+func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
+ }
+ if err := p.Host.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Annotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+//
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+//
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+//
+// Attributes:
+// - Key
+// - Value
+// - AnnotationType
+// - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+//
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+ Key string `thrift:"key,1" json:"key"`
+ Value []byte `thrift:"value,2" json:"value"`
+ AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"`
+ Host *Endpoint `thrift:"host,4" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+ return &BinaryAnnotation{}
+}
+
+func (p *BinaryAnnotation) GetKey() string {
+ return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+ return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+ return p.AnnotationType
+}
+
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return BinaryAnnotation_Host_DEFAULT
+ }
+ return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Key = v
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.Value = v
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ temp := AnnotationType(v)
+ p.AnnotationType = temp
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+ }
+ if err := oprot.WriteBinary(p.Value); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
+ }
+ if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
+ }
+ if err := p.Host.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+//
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+//
+// Attributes:
+// - TraceID
+// - Name: Span name in lowercase, rpc method for example
+//
+// Conventionally, when the span name isn't known, name = "unknown".
+// - ID
+// - ParentID
+// - Annotations
+// - BinaryAnnotations
+// - Debug
+// - Timestamp: Microseconds from epoch of the creation of this span.
+//
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+//
+// For compatibilty with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+//
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+// - Duration: Measurement of duration in microseconds, used to support queries.
+//
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+//
+// For compatibilty with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+//
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+//
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+type Span struct {
+ TraceID int64 `thrift:"trace_id,1" json:"trace_id"`
+ // unused field # 2
+ Name string `thrift:"name,3" json:"name"`
+ ID int64 `thrift:"id,4" json:"id"`
+ ParentID *int64 `thrift:"parent_id,5" json:"parent_id,omitempty"`
+ Annotations []*Annotation `thrift:"annotations,6" json:"annotations"`
+ // unused field # 7
+ BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"`
+ Debug bool `thrift:"debug,9" json:"debug,omitempty"`
+ Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"`
+ Duration *int64 `thrift:"duration,11" json:"duration,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+func (p *Span) GetTraceID() int64 {
+ return p.TraceID
+}
+
+func (p *Span) GetName() string {
+ return p.Name
+}
+
+func (p *Span) GetID() int64 {
+ return p.ID
+}
+
+var Span_ParentID_DEFAULT int64
+
+func (p *Span) GetParentID() int64 {
+ if !p.IsSetParentID() {
+ return Span_ParentID_DEFAULT
+ }
+ return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+ return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+ return p.BinaryAnnotations
+}
+
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+ return p.Debug
+}
+
+var Span_Timestamp_DEFAULT int64
+
+func (p *Span) GetTimestamp() int64 {
+ if !p.IsSetTimestamp() {
+ return Span_Timestamp_DEFAULT
+ }
+ return *p.Timestamp
+}
+
+var Span_Duration_DEFAULT int64
+
+func (p *Span) GetDuration() int64 {
+ if !p.IsSetDuration() {
+ return Span_Duration_DEFAULT
+ }
+ return *p.Duration
+}
+func (p *Span) IsSetParentID() bool {
+ return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+ return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+ return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+ return p.Duration != nil
+}
+
+func (p *Span) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
+ case 5:
+ if err := p.readField5(iprot); err != nil {
+ return err
+ }
+ case 6:
+ if err := p.readField6(iprot); err != nil {
+ return err
+ }
+ case 8:
+ if err := p.readField8(iprot); err != nil {
+ return err
+ }
+ case 9:
+ if err := p.readField9(iprot); err != nil {
+ return err
+ }
+ case 10:
+ if err := p.readField10(iprot); err != nil {
+ return err
+ }
+ case 11:
+ if err := p.readField11(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Span) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.TraceID = v
+ }
+ return nil
+}
+
+func (p *Span) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.Name = v
+ }
+ return nil
+}
+
+func (p *Span) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.ID = v
+ }
+ return nil
+}
+
+func (p *Span) readField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.ParentID = &v
+ }
+ return nil
+}
+
+func (p *Span) readField6(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Annotation, 0, size)
+ p.Annotations = tSlice
+ for i := 0; i < size; i++ {
+ _elem0 := &Annotation{}
+ if err := _elem0.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Annotations = append(p.Annotations, _elem0)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField8(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BinaryAnnotation, 0, size)
+ p.BinaryAnnotations = tSlice
+ for i := 0; i < size; i++ {
+ _elem1 := &BinaryAnnotation{}
+ if err := _elem1.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+ }
+ p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) readField9(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+ } else {
+ p.Debug = v
+ }
+ return nil
+}
+
+func (p *Span) readField10(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 10: ", err)
+ } else {
+ p.Timestamp = &v
+ }
+ return nil
+}
+
+func (p *Span) readField11(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 11: ", err)
+ } else {
+ p.Duration = &v
+ }
+ return nil
+}
+
+func (p *Span) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField6(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField8(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField9(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField10(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField11(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TraceID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Name)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.ID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetParentID() {
+ if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.ParentID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Annotations {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.BinaryAnnotations {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
+ }
+ return err
+}
+
+func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDebug() {
+ if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(p.Debug)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimestamp() {
+ if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDuration() {
+ if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type Response struct {
+ Ok bool `thrift:"ok,1,required" json:"ok"`
+}
+
+func NewResponse() *Response {
+ return &Response{}
+}
+
+func (p *Response) GetOk() bool {
+ return p.Ok
+}
+func (p *Response) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+ }
+ return nil
+}
+
+func (p *Response) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Ok = v
+ }
+ return nil
+}
+
+func (p *Response) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("Response"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *Response) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+ }
+ return err
+}
+
+func (p *Response) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Response(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
new file mode 100644
index 000000000..417e883d0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
@@ -0,0 +1,446 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type ZipkinCollector interface {
+ // Parameters:
+ // - Spans
+ SubmitZipkinBatch(spans []*Span) (r []*Response, err error)
+}
+
+type ZipkinCollectorClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
+ }
+}
+
+// Parameters:
+// - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) {
+ if err = p.sendSubmitZipkinBatch(spans); err != nil {
+ return
+ }
+ return p.recvSubmitZipkinBatch()
+}
+
+func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := ZipkinCollectorSubmitZipkinBatchArgs{
+ Spans: spans,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
+ }
+ if method != "submitZipkinBatch" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error3 error
+ error3, err = error2.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error3
+ return
+ }
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type")
+ return
+ }
+ result := ZipkinCollectorSubmitZipkinBatchResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ value = result.GetSuccess()
+ return
+}
+
+type ZipkinCollectorProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+ self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
+ return self4
+}
+
+func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x5.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x5
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+ handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := ZipkinCollectorSubmitZipkinBatchArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := ZipkinCollectorSubmitZipkinBatchResult{}
+ var retval []*Response
+ var err2 error
+ if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
+ oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+ Spans []*Span `thrift:"spans,1" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+ return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+ return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i++ {
+ _elem6 := &Span{}
+ if err := _elem6.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err)
+ }
+ p.Spans = append(p.Spans, _elem6)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+ }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+ Success []*Response `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+ return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+ return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.readField0(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Response, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i++ {
+ _elem7 := &Response{}
+ if err := _elem7.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err)
+ }
+ p.Success = append(p.Success, _elem7)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
new file mode 100644
index 000000000..1d8e642e0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
@@ -0,0 +1,7 @@
+# Apache Thrift
+
+This is a partial copy of Apache Thrift v0.10 (https://github.com/apache/thrift/commit/b2a4d4ae21c789b689dd162deb819665567f481c).
+
+It is vendored code to avoid compatibility issues introduced in Thrift v0.11.
+
+See https://github.com/jaegertracing/jaeger-client-go/pull/303.
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
new file mode 100644
index 000000000..6655cc5a9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+const (
+ UNKNOWN_APPLICATION_EXCEPTION = 0
+ UNKNOWN_METHOD = 1
+ INVALID_MESSAGE_TYPE_EXCEPTION = 2
+ WRONG_METHOD_NAME = 3
+ BAD_SEQUENCE_ID = 4
+ MISSING_RESULT = 5
+ INTERNAL_ERROR = 6
+ PROTOCOL_ERROR = 7
+)
+
+// Application level Thrift exception
+type TApplicationException interface {
+ TException
+ TypeId() int32
+ Read(iprot TProtocol) (TApplicationException, error)
+ Write(oprot TProtocol) error
+}
+
+type tApplicationException struct {
+ message string
+ type_ int32
+}
+
+func (e tApplicationException) Error() string {
+ return e.message
+}
+
+func NewTApplicationException(type_ int32, message string) TApplicationException {
+ return &tApplicationException{message, type_}
+}
+
+func (p *tApplicationException) TypeId() int32 {
+ return p.type_
+}
+
+func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
+ _, err := iprot.ReadStructBegin()
+ if err != nil {
+ return nil, err
+ }
+
+ message := ""
+ type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
+
+ for {
+ _, ttype, id, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return nil, err
+ }
+ if ttype == STOP {
+ break
+ }
+ switch id {
+ case 1:
+ if ttype == STRING {
+ if message, err = iprot.ReadString(); err != nil {
+ return nil, err
+ }
+ } else {
+ if err = SkipDefaultDepth(iprot, ttype); err != nil {
+ return nil, err
+ }
+ }
+ case 2:
+ if ttype == I32 {
+ if type_, err = iprot.ReadI32(); err != nil {
+ return nil, err
+ }
+ } else {
+ if err = SkipDefaultDepth(iprot, ttype); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ if err = SkipDefaultDepth(iprot, ttype); err != nil {
+ return nil, err
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ return nil, err
+ }
+ }
+ return NewTApplicationException(type_, message), iprot.ReadStructEnd()
+}
+
+func (p *tApplicationException) Write(oprot TProtocol) (err error) {
+ err = oprot.WriteStructBegin("TApplicationException")
+ if len(p.Error()) > 0 {
+ err = oprot.WriteFieldBegin("message", STRING, 1)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteString(p.Error())
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldEnd()
+ if err != nil {
+ return
+ }
+ }
+ err = oprot.WriteFieldBegin("type", I32, 2)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteI32(p.type_)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldEnd()
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldStop()
+ if err != nil {
+ return
+ }
+ err = oprot.WriteStructEnd()
+ return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
new file mode 100644
index 000000000..690d34111
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
@@ -0,0 +1,514 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+type TBinaryProtocol struct {
+ trans TRichTransport
+ origTransport TTransport
+ reader io.Reader
+ writer io.Writer
+ strictRead bool
+ strictWrite bool
+ buffer [64]byte
+}
+
+type TBinaryProtocolFactory struct {
+ strictRead bool
+ strictWrite bool
+}
+
+func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
+ return NewTBinaryProtocol(t, false, true)
+}
+
+func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
+ p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
+ if et, ok := t.(TRichTransport); ok {
+ p.trans = et
+ } else {
+ p.trans = NewTRichTransport(t)
+ }
+ p.reader = p.trans
+ p.writer = p.trans
+ return p
+}
+
+func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
+ return NewTBinaryProtocolFactory(false, true)
+}
+
+func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
+ return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
+}
+
+func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
+ return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
+}
+
+/**
+ * Writing Methods
+ */
+
+func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+ if p.strictWrite {
+ version := uint32(VERSION_1) | uint32(typeId)
+ e := p.WriteI32(int32(version))
+ if e != nil {
+ return e
+ }
+ e = p.WriteString(name)
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(seqId)
+ return e
+ } else {
+ e := p.WriteString(name)
+ if e != nil {
+ return e
+ }
+ e = p.WriteByte(int8(typeId))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(seqId)
+ return e
+ }
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteMessageEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteStructBegin(name string) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteStructEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+ e := p.WriteByte(int8(typeId))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI16(id)
+ return e
+}
+
+func (p *TBinaryProtocol) WriteFieldEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldStop() error {
+ e := p.WriteByte(STOP)
+ return e
+}
+
+func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+ e := p.WriteByte(int8(keyType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteByte(int8(valueType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteMapEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
+ e := p.WriteByte(int8(elemType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteListEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
+ e := p.WriteByte(int8(elemType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteSetEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteBool(value bool) error {
+ if value {
+ return p.WriteByte(1)
+ }
+ return p.WriteByte(0)
+}
+
+func (p *TBinaryProtocol) WriteByte(value int8) error {
+ e := p.trans.WriteByte(byte(value))
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI16(value int16) error {
+ v := p.buffer[0:2]
+ binary.BigEndian.PutUint16(v, uint16(value))
+ _, e := p.writer.Write(v)
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI32(value int32) error {
+ v := p.buffer[0:4]
+ binary.BigEndian.PutUint32(v, uint32(value))
+ _, e := p.writer.Write(v)
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI64(value int64) error {
+ v := p.buffer[0:8]
+ binary.BigEndian.PutUint64(v, uint64(value))
+ _, err := p.writer.Write(v)
+ return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteDouble(value float64) error {
+ return p.WriteI64(int64(math.Float64bits(value)))
+}
+
+func (p *TBinaryProtocol) WriteString(value string) error {
+ e := p.WriteI32(int32(len(value)))
+ if e != nil {
+ return e
+ }
+ _, err := p.trans.WriteString(value)
+ return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteBinary(value []byte) error {
+ e := p.WriteI32(int32(len(value)))
+ if e != nil {
+ return e
+ }
+ _, err := p.writer.Write(value)
+ return NewTProtocolException(err)
+}
+
+/**
+ * Reading methods
+ */
+
+func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+ size, e := p.ReadI32()
+ if e != nil {
+ return "", typeId, 0, NewTProtocolException(e)
+ }
+ if size < 0 {
+ typeId = TMessageType(size & 0x0ff)
+ version := int64(int64(size) & VERSION_MASK)
+ if version != VERSION_1 {
+ return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
+ }
+ name, e = p.ReadString()
+ if e != nil {
+ return name, typeId, seqId, NewTProtocolException(e)
+ }
+ seqId, e = p.ReadI32()
+ if e != nil {
+ return name, typeId, seqId, NewTProtocolException(e)
+ }
+ return name, typeId, seqId, nil
+ }
+ if p.strictRead {
+ return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
+ }
+ name, e2 := p.readStringBody(size)
+ if e2 != nil {
+ return name, typeId, seqId, e2
+ }
+ b, e3 := p.ReadByte()
+ if e3 != nil {
+ return name, typeId, seqId, e3
+ }
+ typeId = TMessageType(b)
+ seqId, e4 := p.ReadI32()
+ if e4 != nil {
+ return name, typeId, seqId, e4
+ }
+ return name, typeId, seqId, nil
+}
+
+func (p *TBinaryProtocol) ReadMessageEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
+ return
+}
+
+func (p *TBinaryProtocol) ReadStructEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
+ t, err := p.ReadByte()
+ typeId = TType(t)
+ if err != nil {
+ return name, typeId, seqId, err
+ }
+ if t != STOP {
+ seqId, err = p.ReadI16()
+ }
+ return name, typeId, seqId, err
+}
+
+func (p *TBinaryProtocol) ReadFieldEnd() error {
+ return nil
+}
+
+var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
+
+func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
+ k, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ kType = TType(k)
+ v, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ vType = TType(v)
+ size32, e := p.ReadI32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+ return kType, vType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadMapEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
+ b, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ elemType = TType(b)
+ size32, e := p.ReadI32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+
+ return
+}
+
+func (p *TBinaryProtocol) ReadListEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+ b, e := p.ReadByte()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ elemType = TType(b)
+ size32, e := p.ReadI32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+ return elemType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadSetEnd() error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadBool() (bool, error) {
+ b, e := p.ReadByte()
+ v := true
+ if b != 1 {
+ v = false
+ }
+ return v, e
+}
+
+func (p *TBinaryProtocol) ReadByte() (int8, error) {
+ v, err := p.trans.ReadByte()
+ return int8(v), err
+}
+
+func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
+ buf := p.buffer[0:2]
+ err = p.readAll(buf)
+ value = int16(binary.BigEndian.Uint16(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
+ buf := p.buffer[0:4]
+ err = p.readAll(buf)
+ value = int32(binary.BigEndian.Uint32(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
+ buf := p.buffer[0:8]
+ err = p.readAll(buf)
+ value = int64(binary.BigEndian.Uint64(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
+ buf := p.buffer[0:8]
+ err = p.readAll(buf)
+ value = math.Float64frombits(binary.BigEndian.Uint64(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadString() (value string, err error) {
+ size, e := p.ReadI32()
+ if e != nil {
+ return "", e
+ }
+ if size < 0 {
+ err = invalidDataLength
+ return
+ }
+
+ return p.readStringBody(size)
+}
+
+func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
+ size, e := p.ReadI32()
+ if e != nil {
+ return nil, e
+ }
+ if size < 0 {
+ return nil, invalidDataLength
+ }
+ if uint64(size) > p.trans.RemainingBytes() {
+ return nil, invalidDataLength
+ }
+
+ isize := int(size)
+ buf := make([]byte, isize)
+ _, err := io.ReadFull(p.trans, buf)
+ return buf, NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) Flush() (err error) {
+ return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
+ return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TBinaryProtocol) Transport() TTransport {
+ return p.origTransport
+}
+
+func (p *TBinaryProtocol) readAll(buf []byte) error {
+ _, err := io.ReadFull(p.reader, buf)
+ return NewTProtocolException(err)
+}
+
+const readLimit = 32768
+
+func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+ if size < 0 {
+ return "", nil
+ }
+ if uint64(size) > p.trans.RemainingBytes() {
+ return "", invalidDataLength
+ }
+
+ var (
+ buf bytes.Buffer
+ e error
+ b []byte
+ )
+
+ switch {
+ case int(size) <= len(p.buffer):
+ b = p.buffer[:size] // avoids allocation for small reads
+ case int(size) < readLimit:
+ b = make([]byte, size)
+ default:
+ b = make([]byte, readLimit)
+ }
+
+ for size > 0 {
+ _, e = io.ReadFull(p.trans, b)
+ buf.Write(b)
+ if e != nil {
+ break
+ }
+ size -= readLimit
+ if size < readLimit && size > 0 {
+ b = b[:size]
+ }
+ }
+ return buf.String(), NewTProtocolException(e)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
new file mode 100644
index 000000000..b9299f2fa
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
@@ -0,0 +1,815 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ COMPACT_PROTOCOL_ID = 0x082
+ COMPACT_VERSION = 1
+ COMPACT_VERSION_MASK = 0x1f
+ COMPACT_TYPE_MASK = 0x0E0
+ COMPACT_TYPE_BITS = 0x07
+ COMPACT_TYPE_SHIFT_AMOUNT = 5
+)
+
+type tCompactType byte
+
+const (
+ COMPACT_BOOLEAN_TRUE = 0x01
+ COMPACT_BOOLEAN_FALSE = 0x02
+ COMPACT_BYTE = 0x03
+ COMPACT_I16 = 0x04
+ COMPACT_I32 = 0x05
+ COMPACT_I64 = 0x06
+ COMPACT_DOUBLE = 0x07
+ COMPACT_BINARY = 0x08
+ COMPACT_LIST = 0x09
+ COMPACT_SET = 0x0A
+ COMPACT_MAP = 0x0B
+ COMPACT_STRUCT = 0x0C
+)
+
+var (
+ ttypeToCompactType map[TType]tCompactType
+)
+
+func init() {
+ ttypeToCompactType = map[TType]tCompactType{
+ STOP: STOP,
+ BOOL: COMPACT_BOOLEAN_TRUE,
+ BYTE: COMPACT_BYTE,
+ I16: COMPACT_I16,
+ I32: COMPACT_I32,
+ I64: COMPACT_I64,
+ DOUBLE: COMPACT_DOUBLE,
+ STRING: COMPACT_BINARY,
+ LIST: COMPACT_LIST,
+ SET: COMPACT_SET,
+ MAP: COMPACT_MAP,
+ STRUCT: COMPACT_STRUCT,
+ }
+}
+
+type TCompactProtocolFactory struct{}
+
+func NewTCompactProtocolFactory() *TCompactProtocolFactory {
+ return &TCompactProtocolFactory{}
+}
+
+func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return NewTCompactProtocol(trans)
+}
+
+type TCompactProtocol struct {
+ trans TRichTransport
+ origTransport TTransport
+
+ // Used to keep track of the last field for the current and previous structs,
+ // so we can do the delta stuff.
+ lastField []int
+ lastFieldId int
+
+ // If we encounter a boolean field begin, save the TField here so it can
+ // have the value incorporated.
+ booleanFieldName string
+ booleanFieldId int16
+ booleanFieldPending bool
+
+ // If we read a field header, and it's a boolean field, save the boolean
+ // value here so that readBool can use it.
+ boolValue bool
+ boolValueIsNotNull bool
+ buffer [64]byte
+}
+
+// Create a TCompactProtocol given a TTransport
+func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
+ p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
+ if et, ok := trans.(TRichTransport); ok {
+ p.trans = et
+ } else {
+ p.trans = NewTRichTransport(trans)
+ }
+
+ return p
+
+}
+
+//
+// Public Writing methods.
+//
+
+// Write a message header to the wire. Compact Protocol messages contain the
+// protocol version so we can migrate forwards in the future if need be.
+func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+ err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ _, err = p.writeVarint32(seqid)
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ e := p.WriteString(name)
+ return e
+
+}
+
+func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
+
+// Write a struct begin. This doesn't actually put anything on the wire. We
+// use it as an opportunity to put special placeholder markers on the field
+// stack so we can get the field id deltas correct.
+func (p *TCompactProtocol) WriteStructBegin(name string) error {
+ p.lastField = append(p.lastField, p.lastFieldId)
+ p.lastFieldId = 0
+ return nil
+}
+
+// Write a struct end. This doesn't actually put anything on the wire. We use
+// this as an opportunity to pop the last field from the current struct off
+// of the field stack.
+func (p *TCompactProtocol) WriteStructEnd() error {
+ p.lastFieldId = p.lastField[len(p.lastField)-1]
+ p.lastField = p.lastField[:len(p.lastField)-1]
+ return nil
+}
+
+func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+ if typeId == BOOL {
+ // we want to possibly include the value, so we'll wait.
+ p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
+ return nil
+ }
+ _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
+ return NewTProtocolException(err)
+}
+
+// The workhorse of writeFieldBegin. It has the option of doing a
+// 'type override' of the type header. This is used specifically in the
+// boolean field case.
+func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
+ // short lastField = lastField_.pop();
+
+ // if there's a type override, use that.
+ var typeToWrite byte
+ if typeOverride == 0xFF {
+ typeToWrite = byte(p.getCompactType(typeId))
+ } else {
+ typeToWrite = typeOverride
+ }
+ // check if we can use delta encoding for the field id
+ fieldId := int(id)
+ written := 0
+ if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
+ // write them together
+ err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ // write them separate
+ err := p.writeByteDirect(typeToWrite)
+ if err != nil {
+ return 0, err
+ }
+ err = p.WriteI16(id)
+ written = 1 + 2
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ p.lastFieldId = fieldId
+ // p.lastField.Push(field.id);
+ return written, nil
+}
+
+func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteFieldStop() error {
+ err := p.writeByteDirect(STOP)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+ if size == 0 {
+ err := p.writeByteDirect(0)
+ return NewTProtocolException(err)
+ }
+ _, err := p.writeVarint32(int32(size))
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapEnd() error { return nil }
+
+// Write a list header.
+func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
+ _, err := p.writeCollectionBegin(elemType, size)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteListEnd() error { return nil }
+
+// Write a set header.
+func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
+ _, err := p.writeCollectionBegin(elemType, size)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteSetEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteBool(value bool) error {
+ v := byte(COMPACT_BOOLEAN_FALSE)
+ if value {
+ v = byte(COMPACT_BOOLEAN_TRUE)
+ }
+ if p.booleanFieldPending {
+ // we haven't written the field header yet
+ _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
+ p.booleanFieldPending = false
+ return NewTProtocolException(err)
+ }
+ // we're not part of a field, so just write the value.
+ err := p.writeByteDirect(v)
+ return NewTProtocolException(err)
+}
+
+// Write a byte. Nothing to see here!
+func (p *TCompactProtocol) WriteByte(value int8) error {
+ err := p.writeByteDirect(byte(value))
+ return NewTProtocolException(err)
+}
+
+// Write an I16 as a zigzag varint.
+func (p *TCompactProtocol) WriteI16(value int16) error {
+ _, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
+ return NewTProtocolException(err)
+}
+
+// Write an i32 as a zigzag varint.
+func (p *TCompactProtocol) WriteI32(value int32) error {
+ _, err := p.writeVarint32(p.int32ToZigzag(value))
+ return NewTProtocolException(err)
+}
+
+// Write an i64 as a zigzag varint.
+func (p *TCompactProtocol) WriteI64(value int64) error {
+ _, err := p.writeVarint64(p.int64ToZigzag(value))
+ return NewTProtocolException(err)
+}
+
+// Write a double to the wire as 8 bytes.
+func (p *TCompactProtocol) WriteDouble(value float64) error {
+ buf := p.buffer[0:8]
+ binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
+ _, err := p.trans.Write(buf)
+ return NewTProtocolException(err)
+}
+
+// Write a string to the wire with a varint size preceding.
+func (p *TCompactProtocol) WriteString(value string) error {
+ _, e := p.writeVarint32(int32(len(value)))
+ if e != nil {
+ return NewTProtocolException(e)
+ }
+ if len(value) > 0 {
+ }
+ _, e = p.trans.WriteString(value)
+ return e
+}
+
+// Write a byte array, using a varint for the size.
+func (p *TCompactProtocol) WriteBinary(bin []byte) error {
+ _, e := p.writeVarint32(int32(len(bin)))
+ if e != nil {
+ return NewTProtocolException(e)
+ }
+ if len(bin) > 0 {
+ _, e = p.trans.Write(bin)
+ return NewTProtocolException(e)
+ }
+ return nil
+}
+
+//
+// Reading methods.
+//
+
+// Read a message header.
+func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+
+ protocolId, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ if protocolId != COMPACT_PROTOCOL_ID {
+ e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
+ return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
+ }
+
+ versionAndType, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ version := versionAndType & COMPACT_VERSION_MASK
+ typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
+ if version != COMPACT_VERSION {
+ e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
+ err = NewTProtocolExceptionWithType(BAD_VERSION, e)
+ return
+ }
+ seqId, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ name, err = p.ReadString()
+ return
+}
+
+func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
+
+// Read a struct begin. There's nothing on the wire for this, but it is our
+// opportunity to push a new struct begin marker onto the field stack.
+func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
+ p.lastField = append(p.lastField, p.lastFieldId)
+ p.lastFieldId = 0
+ return
+}
+
+// Doesn't actually consume any wire data, just removes the last field for
+// this struct from the field stack.
+func (p *TCompactProtocol) ReadStructEnd() error {
+ // consume the last field we read off the wire.
+ p.lastFieldId = p.lastField[len(p.lastField)-1]
+ p.lastField = p.lastField[:len(p.lastField)-1]
+ return nil
+}
+
+// Read a field header off the wire.
+func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
+ t, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ // if it's a stop, then we can return immediately, as the struct is over.
+ if (t & 0x0f) == STOP {
+ return "", STOP, 0, nil
+ }
+
+ // mask off the 4 MSB of the type header. it could contain a field id delta.
+ modifier := int16((t & 0xf0) >> 4)
+ if modifier == 0 {
+ // not a delta. look ahead for the zigzag varint field id.
+ id, err = p.ReadI16()
+ if err != nil {
+ return
+ }
+ } else {
+ // has a delta. add the delta to the last read field id.
+ id = int16(p.lastFieldId) + modifier
+ }
+ typeId, e := p.getTType(tCompactType(t & 0x0f))
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+
+ // if this happens to be a boolean field, the value is encoded in the type
+ if p.isBoolType(t) {
+ // save the boolean value in a special instance variable.
+ p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
+ p.boolValueIsNotNull = true
+ }
+
+ // push the new field onto the field stack so we can keep the deltas going.
+ p.lastFieldId = int(id)
+ return
+}
+
+func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
+
+// Read a map header off the wire. If the size is zero, skip reading the key
+// and value type. This means that 0-length maps will yield TMaps without the
+// "correct" types.
+func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
+ size32, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+
+ keyAndValueType := byte(STOP)
+ if size != 0 {
+ keyAndValueType, err = p.readByteDirect()
+ if err != nil {
+ return
+ }
+ }
+ keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
+ valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
+ return
+}
+
+func (p *TCompactProtocol) ReadMapEnd() error { return nil }
+
+// Read a list header off the wire. If the list size is 0-14, the size will
+// be packed into the element type header. If it's a longer list, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
+ size_and_type, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+ size = int((size_and_type >> 4) & 0x0f)
+ if size == 15 {
+ size2, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size2 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size2)
+ }
+ elemType, e := p.getTType(tCompactType(size_and_type))
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ return
+}
+
+func (p *TCompactProtocol) ReadListEnd() error { return nil }
+
+// Read a set header off the wire. If the set size is 0-14, the size will
+// be packed into the element type header. If it's a longer set, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+ return p.ReadListBegin()
+}
+
+func (p *TCompactProtocol) ReadSetEnd() error { return nil }
+
+// Read a boolean off the wire. If this is a boolean field, the value should
+// already have been read during readFieldBegin, so we'll just consume the
+// pre-stored value. Otherwise, read a byte.
+func (p *TCompactProtocol) ReadBool() (value bool, err error) {
+ if p.boolValueIsNotNull {
+ p.boolValueIsNotNull = false
+ return p.boolValue, nil
+ }
+ v, err := p.readByteDirect()
+ return v == COMPACT_BOOLEAN_TRUE, err
+}
+
+// Read a single byte off the wire. Nothing interesting here.
+func (p *TCompactProtocol) ReadByte() (int8, error) {
+ v, err := p.readByteDirect()
+ if err != nil {
+ return 0, NewTProtocolException(err)
+ }
+ return int8(v), err
+}
+
+// Read an i16 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI16() (value int16, err error) {
+ v, err := p.ReadI32()
+ return int16(v), err
+}
+
+// Read an i32 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI32() (value int32, err error) {
+ v, e := p.readVarint32()
+ if e != nil {
+ return 0, NewTProtocolException(e)
+ }
+ value = p.zigzagToInt32(v)
+ return value, nil
+}
+
+// Read an i64 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI64() (value int64, err error) {
+ v, e := p.readVarint64()
+ if e != nil {
+ return 0, NewTProtocolException(e)
+ }
+ value = p.zigzagToInt64(v)
+ return value, nil
+}
+
+// No magic here - just read a double off the wire.
+func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
+ longBits := p.buffer[0:8]
+ _, e := io.ReadFull(p.trans, longBits)
+ if e != nil {
+ return 0.0, NewTProtocolException(e)
+ }
+ return math.Float64frombits(p.bytesToUint64(longBits)), nil
+}
+
+// Reads a []byte (via readBinary), and then UTF-8 decodes it.
+func (p *TCompactProtocol) ReadString() (value string, err error) {
+ length, e := p.readVarint32()
+ if e != nil {
+ return "", NewTProtocolException(e)
+ }
+ if length < 0 {
+ return "", invalidDataLength
+ }
+ if uint64(length) > p.trans.RemainingBytes() {
+ return "", invalidDataLength
+ }
+
+ if length == 0 {
+ return "", nil
+ }
+ var buf []byte
+ if length <= int32(len(p.buffer)) {
+ buf = p.buffer[0:length]
+ } else {
+ buf = make([]byte, length)
+ }
+ _, e = io.ReadFull(p.trans, buf)
+ return string(buf), NewTProtocolException(e)
+}
+
+// Read a []byte from the wire.
+func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
+ length, e := p.readVarint32()
+ if e != nil {
+ return nil, NewTProtocolException(e)
+ }
+ if length == 0 {
+ return []byte{}, nil
+ }
+ if length < 0 {
+ return nil, invalidDataLength
+ }
+ if uint64(length) > p.trans.RemainingBytes() {
+ return nil, invalidDataLength
+ }
+
+ buf := make([]byte, length)
+ _, e = io.ReadFull(p.trans, buf)
+ return buf, NewTProtocolException(e)
+}
+
+func (p *TCompactProtocol) Flush() (err error) {
+ return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
+ return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TCompactProtocol) Transport() TTransport {
+ return p.origTransport
+}
+
+//
+// Internal writing methods
+//
+
+// Abstract method for writing the start of lists and sets. List and sets on
+// the wire differ only by the type indicator.
+func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
+ if size <= 14 {
+ return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
+ }
+ err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
+ if err != nil {
+ return 0, err
+ }
+ m, err := p.writeVarint32(int32(size))
+ return 1 + m, err
+}
+
+// Write an i32 as a varint. Results in 1-5 bytes on the wire.
+// TODO(pomack): make a permanent buffer like writeVarint64?
+func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
+ i32buf := p.buffer[0:5]
+ idx := 0
+ for {
+ if (n & ^0x7F) == 0 {
+ i32buf[idx] = byte(n)
+ idx++
+ // p.writeByteDirect(byte(n));
+ break
+ // return;
+ } else {
+ i32buf[idx] = byte((n & 0x7F) | 0x80)
+ idx++
+ // p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
+ u := uint32(n)
+ n = int32(u >> 7)
+ }
+ }
+ return p.trans.Write(i32buf[0:idx])
+}
+
+// Write an i64 as a varint. Results in 1-10 bytes on the wire.
+func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
+ varint64out := p.buffer[0:10]
+ idx := 0
+ for {
+ if (n & ^0x7F) == 0 {
+ varint64out[idx] = byte(n)
+ idx++
+ break
+ } else {
+ varint64out[idx] = byte((n & 0x7F) | 0x80)
+ idx++
+ u := uint64(n)
+ n = int64(u >> 7)
+ }
+ }
+ return p.trans.Write(varint64out[0:idx])
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
+ return (l << 1) ^ (l >> 63)
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
+ return (n << 1) ^ (n >> 31)
+}
+
+func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
+ binary.LittleEndian.PutUint64(buf, n)
+}
+
+func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
+ binary.LittleEndian.PutUint64(buf, uint64(n))
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+// Used internally by other writing methods that know they need to write a byte.
+func (p *TCompactProtocol) writeByteDirect(b byte) error {
+ return p.trans.WriteByte(b)
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
+ return 1, p.writeByteDirect(byte(n))
+}
+
+//
+// Internal reading methods
+//
+
+// Read an i32 from the wire as a varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 5 bytes.
+func (p *TCompactProtocol) readVarint32() (int32, error) {
+ // if the wire contains the right stuff, this will just truncate the i64 we
+ // read and get us the right sign.
+ v, err := p.readVarint64()
+ return int32(v), err
+}
+
+// Read an i64 from the wire as a proper varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 10 bytes.
+func (p *TCompactProtocol) readVarint64() (int64, error) {
+ shift := uint(0)
+ result := int64(0)
+ for {
+ b, err := p.readByteDirect()
+ if err != nil {
+ return 0, err
+ }
+ result |= int64(b&0x7f) << shift
+ if (b & 0x80) != 0x80 {
+ break
+ }
+ shift += 7
+ }
+ return result, nil
+}
+
+// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
+func (p *TCompactProtocol) readByteDirect() (byte, error) {
+ return p.trans.ReadByte()
+}
+
+//
+// encoding helpers
+//
+
+// Convert from zigzag int to int.
+func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
+ u := uint32(n)
+ return int32(u>>1) ^ -(n & 1)
+}
+
+// Convert from zigzag long to long.
+func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
+ u := uint64(n)
+ return int64(u>>1) ^ -(n & 1)
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
+ return int64(binary.LittleEndian.Uint64(b))
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
+ return binary.LittleEndian.Uint64(b)
+}
+
+//
+// type testing and converting
+//
+
+func (p *TCompactProtocol) isBoolType(b byte) bool {
+ return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
+}
+
+// Given a tCompactType constant, convert it to its corresponding
+// TType value.
+func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
+ switch byte(t) & 0x0f {
+ case STOP:
+ return STOP, nil
+ case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
+ return BOOL, nil
+ case COMPACT_BYTE:
+ return BYTE, nil
+ case COMPACT_I16:
+ return I16, nil
+ case COMPACT_I32:
+ return I32, nil
+ case COMPACT_I64:
+ return I64, nil
+ case COMPACT_DOUBLE:
+ return DOUBLE, nil
+ case COMPACT_BINARY:
+ return STRING, nil
+ case COMPACT_LIST:
+ return LIST, nil
+ case COMPACT_SET:
+ return SET, nil
+ case COMPACT_MAP:
+ return MAP, nil
+ case COMPACT_STRUCT:
+ return STRUCT, nil
+ }
+ return STOP, TException(fmt.Errorf("don't know what type: %d", t&0x0f))
+}
+
+// Given a TType value, find the appropriate TCompactProtocol.Types constant.
+func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
+ return ttypeToCompactType[t]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
new file mode 100644
index 000000000..ea8d6f661
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+)
+
+// Generic Thrift exception
+type TException interface {
+ error
+}
+
+// Prepends additional information to an error without losing the Thrift exception interface
+func PrependError(prepend string, err error) error {
+ if t, ok := err.(TTransportException); ok {
+ return NewTTransportException(t.TypeId(), prepend+t.Error())
+ }
+ if t, ok := err.(TProtocolException); ok {
+ return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
+ }
+ if t, ok := err.(TApplicationException); ok {
+ return NewTApplicationException(t.TypeId(), prepend+t.Error())
+ }
+
+ return errors.New(prepend + err.Error())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
new file mode 100644
index 000000000..b62fd56f0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bytes"
+)
+
+// Memory buffer-based implementation of the TTransport interface.
+type TMemoryBuffer struct {
+ *bytes.Buffer
+ size int
+}
+
+type TMemoryBufferTransportFactory struct {
+ size int
+}
+
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
+ if trans != nil {
+ t, ok := trans.(*TMemoryBuffer)
+ if ok && t.size > 0 {
+ return NewTMemoryBufferLen(t.size)
+ }
+ }
+ return NewTMemoryBufferLen(p.size)
+}
+
+func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
+ return &TMemoryBufferTransportFactory{size: size}
+}
+
+func NewTMemoryBuffer() *TMemoryBuffer {
+ return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
+}
+
+func NewTMemoryBufferLen(size int) *TMemoryBuffer {
+ buf := make([]byte, 0, size)
+ return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
+}
+
+func (p *TMemoryBuffer) IsOpen() bool {
+ return true
+}
+
+func (p *TMemoryBuffer) Open() error {
+ return nil
+}
+
+func (p *TMemoryBuffer) Close() error {
+ p.Buffer.Reset()
+ return nil
+}
+
+// Flushing a memory buffer is a no-op
+func (p *TMemoryBuffer) Flush() error {
+ return nil
+}
+
+func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
+ return uint64(p.Buffer.Len())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
new file mode 100644
index 000000000..25ab2e98a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Message type constants in the Thrift protocol.
+type TMessageType int32
+
+const (
+ INVALID_TMESSAGE_TYPE TMessageType = 0
+ CALL TMessageType = 1
+ REPLY TMessageType = 2
+ EXCEPTION TMessageType = 3
+ ONEWAY TMessageType = 4
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
new file mode 100644
index 000000000..aa8daa9b5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "math"
+ "strconv"
+)
+
+type Numeric interface {
+ Int64() int64
+ Int32() int32
+ Int16() int16
+ Byte() byte
+ Int() int
+ Float64() float64
+ Float32() float32
+ String() string
+ isNull() bool
+}
+
+type numeric struct {
+ iValue int64
+ dValue float64
+ sValue string
+ isNil bool
+}
+
+var (
+ INFINITY Numeric
+ NEGATIVE_INFINITY Numeric
+ NAN Numeric
+ ZERO Numeric
+ NUMERIC_NULL Numeric
+)
+
+func NewNumericFromDouble(dValue float64) Numeric {
+ if math.IsInf(dValue, 1) {
+ return INFINITY
+ }
+ if math.IsInf(dValue, -1) {
+ return NEGATIVE_INFINITY
+ }
+ if math.IsNaN(dValue) {
+ return NAN
+ }
+ iValue := int64(dValue)
+ sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
+ isNil := false
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI64(iValue int64) Numeric {
+ dValue := float64(iValue)
+ sValue := string(iValue)
+ isNil := false
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI32(iValue int32) Numeric {
+ dValue := float64(iValue)
+ sValue := string(iValue)
+ isNil := false
+ return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromString(sValue string) Numeric {
+ if sValue == INFINITY.String() {
+ return INFINITY
+ }
+ if sValue == NEGATIVE_INFINITY.String() {
+ return NEGATIVE_INFINITY
+ }
+ if sValue == NAN.String() {
+ return NAN
+ }
+ iValue, _ := strconv.ParseInt(sValue, 10, 64)
+ dValue, _ := strconv.ParseFloat(sValue, 64)
+ isNil := len(sValue) == 0
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
+ if isNull {
+ return NewNullNumeric()
+ }
+ if sValue == JSON_INFINITY {
+ return INFINITY
+ }
+ if sValue == JSON_NEGATIVE_INFINITY {
+ return NEGATIVE_INFINITY
+ }
+ if sValue == JSON_NAN {
+ return NAN
+ }
+ iValue, _ := strconv.ParseInt(sValue, 10, 64)
+ dValue, _ := strconv.ParseFloat(sValue, 64)
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
+}
+
+func NewNullNumeric() Numeric {
+ return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
+}
+
+func (p *numeric) Int64() int64 {
+ return p.iValue
+}
+
+func (p *numeric) Int32() int32 {
+ return int32(p.iValue)
+}
+
+func (p *numeric) Int16() int16 {
+ return int16(p.iValue)
+}
+
+func (p *numeric) Byte() byte {
+ return byte(p.iValue)
+}
+
+func (p *numeric) Int() int {
+ return int(p.iValue)
+}
+
+func (p *numeric) Float64() float64 {
+ return p.dValue
+}
+
+func (p *numeric) Float32() float32 {
+ return float32(p.dValue)
+}
+
+func (p *numeric) String() string {
+ return p.sValue
+}
+
+func (p *numeric) isNull() bool {
+ return p.isNil
+}
+
+func init() {
+ INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
+ NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
+ NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
+ ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
+ NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
new file mode 100644
index 000000000..ca0d3faf2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+ Process(in, out TProtocol) (bool, TException)
+}
+
+type TProcessorFunction interface {
+ Process(seqId int32, in, out TProtocol) (bool, TException)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
new file mode 100644
index 000000000..45fa202e7
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+)
+
+const (
+ VERSION_MASK = 0xffff0000
+ VERSION_1 = 0x80010000
+)
+
+type TProtocol interface {
+ WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
+ WriteMessageEnd() error
+ WriteStructBegin(name string) error
+ WriteStructEnd() error
+ WriteFieldBegin(name string, typeId TType, id int16) error
+ WriteFieldEnd() error
+ WriteFieldStop() error
+ WriteMapBegin(keyType TType, valueType TType, size int) error
+ WriteMapEnd() error
+ WriteListBegin(elemType TType, size int) error
+ WriteListEnd() error
+ WriteSetBegin(elemType TType, size int) error
+ WriteSetEnd() error
+ WriteBool(value bool) error
+ WriteByte(value int8) error
+ WriteI16(value int16) error
+ WriteI32(value int32) error
+ WriteI64(value int64) error
+ WriteDouble(value float64) error
+ WriteString(value string) error
+ WriteBinary(value []byte) error
+
+ ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
+ ReadMessageEnd() error
+ ReadStructBegin() (name string, err error)
+ ReadStructEnd() error
+ ReadFieldBegin() (name string, typeId TType, id int16, err error)
+ ReadFieldEnd() error
+ ReadMapBegin() (keyType TType, valueType TType, size int, err error)
+ ReadMapEnd() error
+ ReadListBegin() (elemType TType, size int, err error)
+ ReadListEnd() error
+ ReadSetBegin() (elemType TType, size int, err error)
+ ReadSetEnd() error
+ ReadBool() (value bool, err error)
+ ReadByte() (value int8, err error)
+ ReadI16() (value int16, err error)
+ ReadI32() (value int32, err error)
+ ReadI64() (value int64, err error)
+ ReadDouble() (value float64, err error)
+ ReadString() (value string, err error)
+ ReadBinary() (value []byte, err error)
+
+ Skip(fieldType TType) (err error)
+ Flush() (err error)
+
+ Transport() TTransport
+}
+
+// The maximum recursive depth the skip() function will traverse
+const DEFAULT_RECURSION_DEPTH = 64
+
+// Skips over the next data element from the provided input TProtocol object.
+func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
+ return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
+}
+
+// Skips over the next data element from the provided input TProtocol object.
+func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+ if maxDepth <= 0 {
+ return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+ }
+
+ switch fieldType {
+ case STOP:
+ return
+ case BOOL:
+ _, err = self.ReadBool()
+ return
+ case BYTE:
+ _, err = self.ReadByte()
+ return
+ case I16:
+ _, err = self.ReadI16()
+ return
+ case I32:
+ _, err = self.ReadI32()
+ return
+ case I64:
+ _, err = self.ReadI64()
+ return
+ case DOUBLE:
+ _, err = self.ReadDouble()
+ return
+ case STRING:
+ _, err = self.ReadString()
+ return
+ case STRUCT:
+ if _, err = self.ReadStructBegin(); err != nil {
+ return err
+ }
+ for {
+ _, typeId, _, _ := self.ReadFieldBegin()
+ if typeId == STOP {
+ break
+ }
+ err := Skip(self, typeId, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ self.ReadFieldEnd()
+ }
+ return self.ReadStructEnd()
+ case MAP:
+ keyType, valueType, size, err := self.ReadMapBegin()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(self, keyType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ self.Skip(valueType)
+ }
+ return self.ReadMapEnd()
+ case SET:
+ elemType, size, err := self.ReadSetBegin()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(self, elemType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ }
+ return self.ReadSetEnd()
+ case LIST:
+ elemType, size, err := self.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(self, elemType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ }
+ return self.ReadListEnd()
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
new file mode 100644
index 000000000..6e357ee89
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "encoding/base64"
+)
+
+// Thrift Protocol exception
+type TProtocolException interface {
+ TException
+ TypeId() int
+}
+
+const (
+ UNKNOWN_PROTOCOL_EXCEPTION = 0
+ INVALID_DATA = 1
+ NEGATIVE_SIZE = 2
+ SIZE_LIMIT = 3
+ BAD_VERSION = 4
+ NOT_IMPLEMENTED = 5
+ DEPTH_LIMIT = 6
+)
+
+type tProtocolException struct {
+ typeId int
+ message string
+}
+
+func (p *tProtocolException) TypeId() int {
+ return p.typeId
+}
+
+func (p *tProtocolException) String() string {
+ return p.message
+}
+
+func (p *tProtocolException) Error() string {
+ return p.message
+}
+
+func NewTProtocolException(err error) TProtocolException {
+ if err == nil {
+ return nil
+ }
+ if e,ok := err.(TProtocolException); ok {
+ return e
+ }
+ if _, ok := err.(base64.CorruptInputError); ok {
+ return &tProtocolException{INVALID_DATA, err.Error()}
+ }
+ return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
+}
+
+func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
+ if err == nil {
+ return nil
+ }
+ return &tProtocolException{errType, err.Error()}
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
new file mode 100644
index 000000000..c40f796d8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory interface for constructing protocol instances.
+type TProtocolFactory interface {
+ GetProtocol(trans TTransport) TProtocol
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
new file mode 100644
index 000000000..8e296a99b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "io"
+
+type RichTransport struct {
+ TTransport
+}
+
+// Wraps Transport to provide TRichTransport interface
+func NewTRichTransport(trans TTransport) *RichTransport {
+ return &RichTransport{trans}
+}
+
+func (r *RichTransport) ReadByte() (c byte, err error) {
+ return readByte(r.TTransport)
+}
+
+func (r *RichTransport) WriteByte(c byte) error {
+ return writeByte(r.TTransport, c)
+}
+
+func (r *RichTransport) WriteString(s string) (n int, err error) {
+ return r.Write([]byte(s))
+}
+
+func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
+ return r.TTransport.RemainingBytes()
+}
+
+func readByte(r io.Reader) (c byte, err error) {
+ v := [1]byte{0}
+ n, err := r.Read(v[0:1])
+ if n > 0 && (err == nil || err == io.EOF) {
+ return v[0], nil
+ }
+ if n > 0 && err != nil {
+ return v[0], err
+ }
+ if err != nil {
+ return 0, err
+ }
+ return v[0], nil
+}
+
+func writeByte(w io.Writer, c byte) error {
+ v := [1]byte{c}
+ _, err := w.Write(v[0:1])
+ return err
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
new file mode 100644
index 000000000..771222999
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+type TSerializer struct {
+ Transport *TMemoryBuffer
+ Protocol TProtocol
+}
+
+type TStruct interface {
+ Write(p TProtocol) error
+ Read(p TProtocol) error
+}
+
+func NewTSerializer() *TSerializer {
+ transport := NewTMemoryBufferLen(1024)
+ protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
+
+ return &TSerializer{
+ transport,
+ protocol}
+}
+
+func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
+ t.Transport.Reset()
+
+ if err = msg.Write(t.Protocol); err != nil {
+ return
+ }
+
+ if err = t.Protocol.Flush(); err != nil {
+ return
+ }
+ if err = t.Transport.Flush(); err != nil {
+ return
+ }
+
+ return t.Transport.String(), nil
+}
+
+func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
+ t.Transport.Reset()
+
+ if err = msg.Write(t.Protocol); err != nil {
+ return
+ }
+
+ if err = t.Protocol.Flush(); err != nil {
+ return
+ }
+
+ if err = t.Transport.Flush(); err != nil {
+ return
+ }
+
+ b = append(b, t.Transport.Bytes()...)
+ return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
new file mode 100644
index 000000000..412a482d0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
@@ -0,0 +1,1337 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+)
+
+type _ParseContext int
+
+const (
+ _CONTEXT_IN_TOPLEVEL _ParseContext = 1
+ _CONTEXT_IN_LIST_FIRST _ParseContext = 2
+ _CONTEXT_IN_LIST _ParseContext = 3
+ _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4
+ _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5
+ _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6
+)
+
+func (p _ParseContext) String() string {
+ switch p {
+ case _CONTEXT_IN_TOPLEVEL:
+ return "TOPLEVEL"
+ case _CONTEXT_IN_LIST_FIRST:
+ return "LIST-FIRST"
+ case _CONTEXT_IN_LIST:
+ return "LIST"
+ case _CONTEXT_IN_OBJECT_FIRST:
+ return "OBJECT-FIRST"
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ return "OBJECT-NEXT-KEY"
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ return "OBJECT-NEXT-VALUE"
+ }
+ return "UNKNOWN-PARSE-CONTEXT"
+}
+
+// JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages. It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TSimpleJSONProtocol struct {
+ trans TTransport
+
+ parseContextStack []int
+ dumpContext []int
+
+ writer *bufio.Writer
+ reader *bufio.Reader
+}
+
+// Constructor
+func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
+ v := &TSimpleJSONProtocol{trans: t,
+ writer: bufio.NewWriter(t),
+ reader: bufio.NewReader(t),
+ }
+ v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
+ v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
+ return v
+}
+
+// Factory
+type TSimpleJSONProtocolFactory struct{}
+
+func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return NewTSimpleJSONProtocol(trans)
+}
+
+func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
+ return &TSimpleJSONProtocolFactory{}
+}
+
+var (
+ JSON_COMMA []byte
+ JSON_COLON []byte
+ JSON_LBRACE []byte
+ JSON_RBRACE []byte
+ JSON_LBRACKET []byte
+ JSON_RBRACKET []byte
+ JSON_QUOTE byte
+ JSON_QUOTE_BYTES []byte
+ JSON_NULL []byte
+ JSON_TRUE []byte
+ JSON_FALSE []byte
+ JSON_INFINITY string
+ JSON_NEGATIVE_INFINITY string
+ JSON_NAN string
+ JSON_INFINITY_BYTES []byte
+ JSON_NEGATIVE_INFINITY_BYTES []byte
+ JSON_NAN_BYTES []byte
+ json_nonbase_map_elem_bytes []byte
+)
+
+func init() {
+ JSON_COMMA = []byte{','}
+ JSON_COLON = []byte{':'}
+ JSON_LBRACE = []byte{'{'}
+ JSON_RBRACE = []byte{'}'}
+ JSON_LBRACKET = []byte{'['}
+ JSON_RBRACKET = []byte{']'}
+ JSON_QUOTE = '"'
+ JSON_QUOTE_BYTES = []byte{'"'}
+ JSON_NULL = []byte{'n', 'u', 'l', 'l'}
+ JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
+ JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
+ JSON_INFINITY = "Infinity"
+ JSON_NEGATIVE_INFINITY = "-Infinity"
+ JSON_NAN = "NaN"
+ JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+ JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+ JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
+ json_nonbase_map_elem_bytes = []byte{']', ',', '['}
+}
+
+func jsonQuote(s string) string {
+ b, _ := json.Marshal(s)
+ s1 := string(b)
+ return s1
+}
+
+func jsonUnquote(s string) (string, bool) {
+ s1 := new(string)
+ err := json.Unmarshal([]byte(s), s1)
+ return *s1, err == nil
+}
+
+func mismatch(expected, actual string) error {
+ return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+ p.resetContextStack() // THRIFT-3735
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteString(name); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(typeId)); e != nil {
+ return e
+ }
+ if e := p.WriteI32(seqId); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error {
+ if e := p.OutputObjectBegin(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteStructEnd() error {
+ return p.OutputObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+ if e := p.WriteString(name); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldEnd() error {
+ //return p.OutputListEnd()
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil }
+
+func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(keyType)); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(valueType)); e != nil {
+ return e
+ }
+ return p.WriteI32(int32(size))
+}
+
+func (p *TSimpleJSONProtocol) WriteMapEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error {
+ return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteListEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error {
+ return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteSetEnd() error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteBool(b bool) error {
+ return p.OutputBool(b)
+}
+
+func (p *TSimpleJSONProtocol) WriteByte(b int8) error {
+ return p.WriteI32(int32(b))
+}
+
+func (p *TSimpleJSONProtocol) WriteI16(v int16) error {
+ return p.WriteI32(int32(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI32(v int32) error {
+ return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI64(v int64) error {
+ return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteDouble(v float64) error {
+ return p.OutputF64(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteString(v string) error {
+ return p.OutputString(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error {
+ // JSON library only takes in a string,
+ // not an arbitrary byte array, to ensure bytes are transmitted
+ // efficiently we must convert this into a valid JSON string
+ // therefore we use base64 encoding to avoid excessive escaping/quoting
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+ return NewTProtocolException(e)
+ }
+ writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+ if _, e := writer.Write(v); e != nil {
+ p.writer.Reset(p.trans) // THRIFT-3735
+ return NewTProtocolException(e)
+ }
+ if e := writer.Close(); e != nil {
+ return NewTProtocolException(e)
+ }
+ if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+ return NewTProtocolException(e)
+ }
+ return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+ p.resetContextStack() // THRIFT-3735
+ if isNull, err := p.ParseListBegin(); isNull || err != nil {
+ return name, typeId, seqId, err
+ }
+ if name, err = p.ReadString(); err != nil {
+ return name, typeId, seqId, err
+ }
+ bTypeId, err := p.ReadByte()
+ typeId = TMessageType(bTypeId)
+ if err != nil {
+ return name, typeId, seqId, err
+ }
+ if seqId, err = p.ReadI32(); err != nil {
+ return name, typeId, seqId, err
+ }
+ return name, typeId, seqId, nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMessageEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) {
+ _, err = p.ParseObjectStart()
+ return "", err
+}
+
+func (p *TSimpleJSONProtocol) ReadStructEnd() error {
+ return p.ParseObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return "", STOP, 0, err
+ }
+ b, _ := p.reader.Peek(1)
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACE[0]:
+ return "", STOP, 0, nil
+ case JSON_QUOTE:
+ p.reader.ReadByte()
+ name, err := p.ParseStringBody()
+ // simplejson is not meant to be read back into thrift
+ // - see http://wiki.apache.org/thrift/ThriftUsageJava
+ // - use JSON instead
+ if err != nil {
+ return name, STOP, 0, err
+ }
+ return name, STOP, -1, p.ParsePostValue()
+ /*
+ if err = p.ParsePostValue(); err != nil {
+ return name, STOP, 0, err
+ }
+ if isNull, err := p.ParseListBegin(); isNull || err != nil {
+ return name, STOP, 0, err
+ }
+ bType, err := p.ReadByte()
+ thetype := TType(bType)
+ if err != nil {
+ return name, thetype, 0, err
+ }
+ id, err := p.ReadI16()
+ return name, thetype, id, err
+ */
+ }
+ e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
+ return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return "", STOP, 0, NewTProtocolException(io.EOF)
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldEnd() error {
+ return nil
+ //return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
+ if isNull, e := p.ParseListBegin(); isNull || e != nil {
+ return VOID, VOID, 0, e
+ }
+
+ // read keyType
+ bKeyType, e := p.ReadByte()
+ keyType = TType(bKeyType)
+ if e != nil {
+ return keyType, valueType, size, e
+ }
+
+ // read valueType
+ bValueType, e := p.ReadByte()
+ valueType = TType(bValueType)
+ if e != nil {
+ return keyType, valueType, size, e
+ }
+
+ // read size
+ iSize, err := p.ReadI64()
+ size = int(iSize)
+ return keyType, valueType, size, err
+}
+
+func (p *TSimpleJSONProtocol) ReadMapEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
+ return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadListEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
+ return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetEnd() error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadBool() (bool, error) {
+ var value bool
+
+ if err := p.ParsePreValue(); err != nil {
+ return value, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 {
+ switch f[0] {
+ case JSON_TRUE[0]:
+ b := make([]byte, len(JSON_TRUE))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_TRUE) {
+ value = true
+ } else {
+ e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ break
+ case JSON_FALSE[0]:
+ b := make([]byte, len(JSON_FALSE))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_FALSE) {
+ value = false
+ } else {
+ e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ break
+ case JSON_NULL[0]:
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_NULL) {
+ value = false
+ } else {
+ e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ default:
+ e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ return value, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadByte() (int8, error) {
+ v, err := p.ReadI64()
+ return int8(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI16() (int16, error) {
+ v, err := p.ReadI64()
+ return int16(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI32() (int32, error) {
+ v, err := p.ReadI64()
+ return int32(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI64() (int64, error) {
+ v, _, err := p.ParseI64()
+ return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) {
+ v, _, err := p.ParseF64()
+ return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadString() (string, error) {
+ var v string
+ if err := p.ParsePreValue(); err != nil {
+ return v, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 && f[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ value, err := p.ParseStringBody()
+ v = value
+ if err != nil {
+ return v, err
+ }
+ } else if len(f) > 0 && f[0] == JSON_NULL[0] {
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return v, NewTProtocolException(err)
+ }
+ if string(b) != string(JSON_NULL) {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ } else {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
+ var v []byte
+ if err := p.ParsePreValue(); err != nil {
+ return nil, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 && f[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ value, err := p.ParseBase64EncodedBody()
+ v = value
+ if err != nil {
+ return v, err
+ }
+ } else if len(f) > 0 && f[0] == JSON_NULL[0] {
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return v, NewTProtocolException(err)
+ }
+ if string(b) != string(JSON_NULL) {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ } else {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+
+ return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) Flush() (err error) {
+ return NewTProtocolException(p.writer.Flush())
+}
+
+func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) {
+ return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TSimpleJSONProtocol) Transport() TTransport {
+ return p.trans
+}
+
+func (p *TSimpleJSONProtocol) OutputPreValue() error {
+ cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+ switch cxt {
+ case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ if _, e := p.write(JSON_COMMA); e != nil {
+ return NewTProtocolException(e)
+ }
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ if _, e := p.write(JSON_COLON); e != nil {
+ return NewTProtocolException(e)
+ }
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputPostValue() error {
+ cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+ switch cxt {
+ case _CONTEXT_IN_LIST_FIRST:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST))
+ break
+ case _CONTEXT_IN_OBJECT_FIRST:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ var v string
+ if value {
+ v = string(JSON_TRUE)
+ } else {
+ v = string(JSON_FALSE)
+ }
+ switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = jsonQuote(v)
+ default:
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputNull() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_NULL); e != nil {
+ return NewTProtocolException(e)
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ var v string
+ if math.IsNaN(value) {
+ v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
+ } else if math.IsInf(value, 1) {
+ v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
+ } else if math.IsInf(value, -1) {
+ v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
+ } else {
+ v = strconv.FormatFloat(value, 'g', -1, 64)
+ switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
+ default:
+ }
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ v := strconv.FormatInt(value, 10)
+ switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = jsonQuote(v)
+ default:
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputString(s string) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if e := p.OutputStringData(jsonQuote(s)); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
+ _, e := p.write([]byte(s))
+ return NewTProtocolException(e)
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_LBRACE); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST))
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
+ if _, e := p.write(JSON_RBRACE); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ if e := p.OutputPostValue(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListBegin() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_LBRACKET); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST))
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListEnd() error {
+ if _, e := p.write(JSON_RBRACKET); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+ if e := p.OutputPostValue(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteByte(int8(elemType)); e != nil {
+ return e
+ }
+ if e := p.WriteI64(int64(size)); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePreValue() error {
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ b, _ := p.reader.Peek(1)
+ switch cxt {
+ case _CONTEXT_IN_LIST:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACKET[0]:
+ return nil
+ case JSON_COMMA[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACE[0]:
+ return nil
+ case JSON_COMMA[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_COLON[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePostValue() error {
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ switch cxt {
+ case _CONTEXT_IN_LIST_FIRST:
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST))
+ break
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+ break
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
+ for {
+ b, _ := p.reader.Peek(1)
+ if len(b) < 1 {
+ return nil
+ }
+ switch b[0] {
+ case ' ', '\r', '\n', '\t':
+ p.reader.ReadByte()
+ continue
+ default:
+ break
+ }
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
+ line, err := p.reader.ReadString(JSON_QUOTE)
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ l := len(line)
+ // count number of escapes to see if we need to keep going
+ i := 1
+ for ; i < l; i++ {
+ if line[l-i-1] != '\\' {
+ break
+ }
+ }
+ if i&0x01 == 1 {
+ v, ok := jsonUnquote(string(JSON_QUOTE) + line)
+ if !ok {
+ return "", NewTProtocolException(err)
+ }
+ return v, nil
+ }
+ s, err := p.ParseQuotedStringBody()
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ str := string(JSON_QUOTE) + line + s
+ v, ok := jsonUnquote(str)
+ if !ok {
+ e := fmt.Errorf("Unable to parse as JSON string %s", str)
+ return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
+ line, err := p.reader.ReadString(JSON_QUOTE)
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ l := len(line)
+ // count number of escapes to see if we need to keep going
+ i := 1
+ for ; i < l; i++ {
+ if line[l-i-1] != '\\' {
+ break
+ }
+ }
+ if i&0x01 == 1 {
+ return line, nil
+ }
+ s, err := p.ParseQuotedStringBody()
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ v := line + s
+ return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
+ line, err := p.reader.ReadBytes(JSON_QUOTE)
+ if err != nil {
+ return line, NewTProtocolException(err)
+ }
+ line2 := line[0 : len(line)-1]
+ l := len(line2)
+ if (l % 4) != 0 {
+ pad := 4 - (l % 4)
+ fill := [...]byte{'=', '=', '='}
+ line2 = append(line2, fill[:pad]...)
+ l = len(line2)
+ }
+ output := make([]byte, base64.StdEncoding.DecodedLen(l))
+ n, err := base64.StdEncoding.Decode(output, line2)
+ return output[0:n], NewTProtocolException(err)
+}
+
+func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return 0, false, err
+ }
+ var value int64
+ var isnull bool
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ isnull = true
+ } else {
+ num, err := p.readNumeric()
+ isnull = (num == nil)
+ if !isnull {
+ value = num.Int64()
+ }
+ if err != nil {
+ return value, isnull, err
+ }
+ }
+ return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return 0, false, err
+ }
+ var value float64
+ var isnull bool
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ isnull = true
+ } else {
+ num, err := p.readNumeric()
+ isnull = (num == nil)
+ if !isnull {
+ value = num.Float64()
+ }
+ if err != nil {
+ return value, isnull, err
+ }
+ }
+ return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return false, err
+ }
+ var b []byte
+ b, err := p.reader.Peek(1)
+ if err != nil {
+ return false, err
+ }
+ if len(b) > 0 && b[0] == JSON_LBRACE[0] {
+ p.reader.ReadByte()
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST))
+ return false, nil
+ } else if p.safePeekContains(JSON_NULL) {
+ return true, nil
+ }
+ e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
+ return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
+ if isNull, err := p.readIfNull(); isNull || err != nil {
+ return err
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
+ e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ line, err := p.reader.ReadString(JSON_RBRACE[0])
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ for _, char := range line {
+ switch char {
+ default:
+ e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ case ' ', '\n', '\r', '\t', '}':
+ break
+ }
+ }
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
+ if e := p.ParsePreValue(); e != nil {
+ return false, e
+ }
+ var b []byte
+ b, err = p.reader.Peek(1)
+ if err != nil {
+ return false, err
+ }
+ if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
+ p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST))
+ p.reader.ReadByte()
+ isNull = false
+ } else if p.safePeekContains(JSON_NULL) {
+ isNull = true
+ } else {
+ err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
+ }
+ return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
+}
+
+func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+ if isNull, e := p.ParseListBegin(); isNull || e != nil {
+ return VOID, 0, e
+ }
+ bElemType, err := p.ReadByte()
+ elemType = TType(bElemType)
+ if err != nil {
+ return elemType, size, err
+ }
+ nSize, err2 := p.ReadI64()
+ size = int(nSize)
+ return elemType, size, err2
+}
+
+func (p *TSimpleJSONProtocol) ParseListEnd() error {
+ if isNull, err := p.readIfNull(); isNull || err != nil {
+ return err
+ }
+ cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+ if cxt != _CONTEXT_IN_LIST {
+ e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ line, err := p.reader.ReadString(JSON_RBRACKET[0])
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ for _, char := range line {
+ switch char {
+ default:
+ e := fmt.Errorf("Expecting end of list \"]\", but found: \"%s\"", line)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
+ break
+ }
+ }
+ p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+ if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL {
+ return nil
+ }
+ return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
+ e := p.readNonSignificantWhitespace()
+ if e != nil {
+ return nil, VOID, NewTProtocolException(e)
+ }
+ b, e := p.reader.Peek(1)
+ if len(b) > 0 {
+ c := b[0]
+ switch c {
+ case JSON_NULL[0]:
+ buf := make([]byte, len(JSON_NULL))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return nil, VOID, NewTProtocolException(e)
+ }
+ if string(JSON_NULL) != string(buf) {
+ e = mismatch(string(JSON_NULL), string(buf))
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return nil, VOID, nil
+ case JSON_QUOTE:
+ p.reader.ReadByte()
+ v, e := p.ParseStringBody()
+ if e != nil {
+ return v, UTF8, NewTProtocolException(e)
+ }
+ if v == JSON_INFINITY {
+ return INFINITY, DOUBLE, nil
+ } else if v == JSON_NEGATIVE_INFINITY {
+ return NEGATIVE_INFINITY, DOUBLE, nil
+ } else if v == JSON_NAN {
+ return NAN, DOUBLE, nil
+ }
+ return v, UTF8, nil
+ case JSON_TRUE[0]:
+ buf := make([]byte, len(JSON_TRUE))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return true, BOOL, NewTProtocolException(e)
+ }
+ if string(JSON_TRUE) != string(buf) {
+ e := mismatch(string(JSON_TRUE), string(buf))
+ return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return true, BOOL, nil
+ case JSON_FALSE[0]:
+ buf := make([]byte, len(JSON_FALSE))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return false, BOOL, NewTProtocolException(e)
+ }
+ if string(JSON_FALSE) != string(buf) {
+ e := mismatch(string(JSON_FALSE), string(buf))
+ return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return false, BOOL, nil
+ case JSON_LBRACKET[0]:
+ _, e := p.reader.ReadByte()
+ return make([]interface{}, 0), LIST, NewTProtocolException(e)
+ case JSON_LBRACE[0]:
+ _, e := p.reader.ReadByte()
+ return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
+ // assume numeric
+ v, e := p.readNumeric()
+ return v, DOUBLE, e
+ default:
+ e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ e = fmt.Errorf("Cannot read a single element while parsing JSON.")
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+}
+
+func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
+ cont := true
+ for cont {
+ b, _ := p.reader.Peek(1)
+ if len(b) < 1 {
+ return false, nil
+ }
+ switch b[0] {
+ default:
+ return false, nil
+ case JSON_NULL[0]:
+ cont = false
+ break
+ case ' ', '\n', '\r', '\t':
+ p.reader.ReadByte()
+ break
+ }
+ }
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ return true, nil
+ }
+ return false, nil
+}
+
+func (p *TSimpleJSONProtocol) readQuoteIfNext() {
+ b, _ := p.reader.Peek(1)
+ if len(b) > 0 && b[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ }
+}
+
+func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
+ isNull, err := p.readIfNull()
+ if isNull || err != nil {
+ return NUMERIC_NULL, err
+ }
+ hasDecimalPoint := false
+ nextCanBeSign := true
+ hasE := false
+ MAX_LEN := 40
+ buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
+ continueFor := true
+ inQuotes := false
+ for continueFor {
+ c, err := p.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return NUMERIC_NULL, NewTProtocolException(err)
+ }
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ buf.WriteByte(c)
+ nextCanBeSign = false
+ case '.':
+ if hasDecimalPoint {
+ e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if hasE {
+ e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ hasDecimalPoint, nextCanBeSign = true, false
+ case 'e', 'E':
+ if hasE {
+ e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ hasE, nextCanBeSign = true, true
+ case '-', '+':
+ if !nextCanBeSign {
+ e := fmt.Errorf("Negative sign within number")
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ nextCanBeSign = false
+ case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
+ p.reader.UnreadByte()
+ continueFor = false
+ case JSON_NAN[0]:
+ if buf.Len() == 0 {
+ buffer := make([]byte, len(JSON_NAN))
+ buffer[0] = c
+ _, e := p.reader.Read(buffer[1:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_NAN != string(buffer) {
+ e := mismatch(JSON_NAN, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return NAN, nil
+ } else {
+ e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ case JSON_INFINITY[0]:
+ if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
+ buffer := make([]byte, len(JSON_INFINITY))
+ buffer[0] = c
+ _, e := p.reader.Read(buffer[1:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_INFINITY != string(buffer) {
+ e := mismatch(JSON_INFINITY, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return INFINITY, nil
+ } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
+ buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
+ buffer[0] = JSON_NEGATIVE_INFINITY[0]
+ buffer[1] = c
+ _, e := p.reader.Read(buffer[2:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_NEGATIVE_INFINITY != string(buffer) {
+ e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return NEGATIVE_INFINITY, nil
+ } else {
+ e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ case JSON_QUOTE:
+ if !inQuotes {
+ inQuotes = true
+ } else {
+ break
+ }
+ default:
+ e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ if buf.Len() == 0 {
+ e := fmt.Errorf("Unable to parse number from empty string ''")
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return NewNumericFromJSONString(buf.String(), false), nil
+}
+
+// Safely peeks into the buffer, reading only what is necessary
+func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
+ for i := 0; i < len(b); i++ {
+ a, _ := p.reader.Peek(i + 1)
+ if len(a) == 0 || a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Reset the context stack to its initial state.
+func (p *TSimpleJSONProtocol) resetContextStack() {
+ p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)}
+ p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)}
+}
+
+func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
+ n, err := p.writer.Write(b)
+ if err != nil {
+ p.writer.Reset(p.trans) // THRIFT-3735
+ }
+ return n, err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
new file mode 100644
index 000000000..453899651
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "io"
+)
+
+var errTransportInterrupted = errors.New("Transport Interrupted")
+
+type Flusher interface {
+ Flush() (err error)
+}
+
+type ReadSizeProvider interface {
+ RemainingBytes() (num_bytes uint64)
+}
+
+
+// Encapsulates the I/O layer
+type TTransport interface {
+ io.ReadWriteCloser
+ Flusher
+ ReadSizeProvider
+
+ // Opens the transport for communication
+ Open() error
+
+ // Returns true if the transport is open
+ IsOpen() bool
+}
+
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+
+// This is "enchanced" transport with extra capabilities. You need to use one of these
+// to construct protocol.
+// Notably, TSocket does not implement this interface, and it is always a mistake to use
+// TSocket directly in protocol.
+type TRichTransport interface {
+ io.ReadWriter
+ io.ByteReader
+ io.ByteWriter
+ stringWriter
+ Flusher
+ ReadSizeProvider
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
new file mode 100644
index 000000000..9505b4461
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "io"
+)
+
+type timeoutable interface {
+ Timeout() bool
+}
+
+// Thrift Transport exception
+type TTransportException interface {
+ TException
+ TypeId() int
+ Err() error
+}
+
+const (
+ UNKNOWN_TRANSPORT_EXCEPTION = 0
+ NOT_OPEN = 1
+ ALREADY_OPEN = 2
+ TIMED_OUT = 3
+ END_OF_FILE = 4
+)
+
+type tTransportException struct {
+ typeId int
+ err error
+}
+
+func (p *tTransportException) TypeId() int {
+ return p.typeId
+}
+
+func (p *tTransportException) Error() string {
+ return p.err.Error()
+}
+
+func (p *tTransportException) Err() error {
+ return p.err
+}
+
+func NewTTransportException(t int, e string) TTransportException {
+ return &tTransportException{typeId: t, err: errors.New(e)}
+}
+
+func NewTTransportExceptionFromError(e error) TTransportException {
+ if e == nil {
+ return nil
+ }
+
+ if t, ok := e.(TTransportException); ok {
+ return t
+ }
+
+ switch v := e.(type) {
+ case TTransportException:
+ return v
+ case timeoutable:
+ if v.Timeout() {
+ return &tTransportException{typeId: TIMED_OUT, err: e}
+ }
+ }
+
+ if e == io.EOF {
+ return &tTransportException{typeId: END_OF_FILE, err: e}
+ }
+
+ return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
new file mode 100644
index 000000000..533d1b437
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory class used to create wrapped instance of Transports.
+// This is used primarily in servers, which get Transports from
+// a ServerTransport and then may want to mutate them (i.e. create
+// a BufferedTransport from the underlying base transport)
+type TTransportFactory interface {
+ GetTransport(trans TTransport) TTransport
+}
+
+type tTransportFactory struct{}
+
+// Return a wrapped instance of the base Transport.
+func (p *tTransportFactory) GetTransport(trans TTransport) TTransport {
+ return trans
+}
+
+func NewTTransportFactory() TTransportFactory {
+ return &tTransportFactory{}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
new file mode 100644
index 000000000..4292ffcad
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Type constants in the Thrift protocol
+type TType byte
+
+const (
+ STOP = 0
+ VOID = 1
+ BOOL = 2
+ BYTE = 3
+ I08 = 3
+ DOUBLE = 4
+ I16 = 6
+ I32 = 8
+ I64 = 10
+ STRING = 11
+ UTF7 = 11
+ STRUCT = 12
+ MAP = 13
+ SET = 14
+ LIST = 15
+ UTF8 = 16
+ UTF16 = 17
+ //BINARY = 18 wrong and unusued
+)
+
+var typeNames = map[int]string{
+ STOP: "STOP",
+ VOID: "VOID",
+ BOOL: "BOOL",
+ BYTE: "BYTE",
+ DOUBLE: "DOUBLE",
+ I16: "I16",
+ I32: "I32",
+ I64: "I64",
+ STRING: "STRING",
+ STRUCT: "STRUCT",
+ MAP: "MAP",
+ SET: "SET",
+ LIST: "LIST",
+ UTF8: "UTF8",
+ UTF16: "UTF16",
+}
+
+func (p TType) String() string {
+ if s, ok := typeNames[int(p)]; ok {
+ return s
+ }
+ return "Unknown"
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
new file mode 100644
index 000000000..2c29ae8c9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -0,0 +1,441 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ "github.com/uber/jaeger-client-go/internal/throttler"
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// Tracer implements opentracing.Tracer.
+type Tracer struct {
+ serviceName string
+ hostIPv4 uint32 // this is for zipkin endpoint conversion
+
+ sampler Sampler
+ reporter Reporter
+ metrics Metrics
+ logger log.Logger
+
+ timeNow func() time.Time
+ randomNumber func() uint64
+
+ options struct {
+ poolSpans bool
+ gen128Bit bool // whether to generate 128bit trace IDs
+ zipkinSharedRPCSpan bool
+ highTraceIDGenerator func() uint64 // custom high trace ID generator
+ maxTagValueLength int
+ // more options to come
+ }
+ // pool for Span objects
+ spanPool sync.Pool
+
+ injectors map[interface{}]Injector
+ extractors map[interface{}]Extractor
+
+ observer compositeObserver
+
+ tags []Tag
+ process Process
+
+ baggageRestrictionManager baggage.RestrictionManager
+ baggageSetter *baggageSetter
+
+ debugThrottler throttler.Throttler
+}
+
+// NewTracer creates Tracer implementation that reports tracing to Jaeger.
+// The returned io.Closer can be used in shutdown hooks to ensure that the internal
+// queue of the Reporter is drained and all buffered spans are submitted to collectors.
+func NewTracer(
+ serviceName string,
+ sampler Sampler,
+ reporter Reporter,
+ options ...TracerOption,
+) (opentracing.Tracer, io.Closer) {
+ t := &Tracer{
+ serviceName: serviceName,
+ sampler: sampler,
+ reporter: reporter,
+ injectors: make(map[interface{}]Injector),
+ extractors: make(map[interface{}]Extractor),
+ metrics: *NewNullMetrics(),
+ spanPool: sync.Pool{New: func() interface{} {
+ return &Span{}
+ }},
+ }
+
+ for _, option := range options {
+ option(t)
+ }
+
+ // register default injectors/extractors unless they are already provided via options
+ textPropagator := newTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
+ t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+ httpHeaderPropagator := newHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
+ t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+
+ binaryPropagator := newBinaryPropagator(t)
+ t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
+
+ // TODO remove after TChannel supports OpenTracing
+ interopPropagator := &jaegerTraceContextPropagator{tracer: t}
+ t.addCodec(SpanContextFormat, interopPropagator, interopPropagator)
+
+ zipkinPropagator := &zipkinPropagator{tracer: t}
+ t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)
+
+ if t.baggageRestrictionManager != nil {
+ t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)
+ } else {
+ t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)
+ }
+ if t.debugThrottler == nil {
+ t.debugThrottler = throttler.DefaultThrottler{}
+ }
+
+ if t.randomNumber == nil {
+ seedGenerator := utils.NewRand(time.Now().UnixNano())
+ pool := sync.Pool{
+ New: func() interface{} {
+ return rand.NewSource(seedGenerator.Int63())
+ },
+ }
+
+ t.randomNumber = func() uint64 {
+ generator := pool.Get().(rand.Source)
+ number := uint64(generator.Int63())
+ pool.Put(generator)
+ return number
+ }
+ }
+ if t.timeNow == nil {
+ t.timeNow = time.Now
+ }
+ if t.logger == nil {
+ t.logger = log.NullLogger
+ }
+ // Set tracer-level tags
+ t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})
+ if hostname, err := os.Hostname(); err == nil {
+ t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
+ }
+ if ip, err := utils.HostIP(); err == nil {
+ t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
+ t.hostIPv4 = utils.PackIPAsUint32(ip)
+ } else {
+ t.logger.Error("Unable to determine this host's IP address: " + err.Error())
+ }
+
+ if t.options.gen128Bit {
+ if t.options.highTraceIDGenerator == nil {
+ t.options.highTraceIDGenerator = t.randomNumber
+ }
+ } else if t.options.highTraceIDGenerator != nil {
+ t.logger.Error("Overriding high trace ID generator but not generating " +
+ "128 bit trace IDs, consider enabling the \"Gen128Bit\" option")
+ }
+ if t.options.maxTagValueLength == 0 {
+ t.options.maxTagValueLength = DefaultMaxTagValueLength
+ }
+ t.process = Process{
+ Service: serviceName,
+ UUID: strconv.FormatUint(t.randomNumber(), 16),
+ Tags: t.tags,
+ }
+ if throttler, ok := t.debugThrottler.(ProcessSetter); ok {
+ throttler.SetProcess(t.process)
+ }
+
+ return t, t
+}
+
+// addCodec adds registers injector and extractor for given propagation format if not already defined.
+func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) {
+ if _, ok := t.injectors[format]; !ok {
+ t.injectors[format] = injector
+ }
+ if _, ok := t.extractors[format]; !ok {
+ t.extractors[format] = extractor
+ }
+}
+
+// StartSpan implements StartSpan() method of opentracing.Tracer.
+func (t *Tracer) StartSpan(
+ operationName string,
+ options ...opentracing.StartSpanOption,
+) opentracing.Span {
+ sso := opentracing.StartSpanOptions{}
+ for _, o := range options {
+ o.Apply(&sso)
+ }
+ return t.startSpanWithOptions(operationName, sso)
+}
+
+func (t *Tracer) startSpanWithOptions(
+ operationName string,
+ options opentracing.StartSpanOptions,
+) opentracing.Span {
+ if options.StartTime.IsZero() {
+ options.StartTime = t.timeNow()
+ }
+
+ // Predicate whether the given span context is a valid reference
+ // which may be used as parent / debug ID / baggage items source
+ isValidReference := func(ctx SpanContext) bool {
+ return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0
+ }
+
+ var references []Reference
+ var parent SpanContext
+ var hasParent bool // need this because `parent` is a value, not reference
+ for _, ref := range options.References {
+ ctx, ok := ref.ReferencedContext.(SpanContext)
+ if !ok {
+ t.logger.Error(fmt.Sprintf(
+ "Reference contains invalid type of SpanReference: %s",
+ reflect.ValueOf(ref.ReferencedContext)))
+ continue
+ }
+ if !isValidReference(ctx) {
+ continue
+ }
+ references = append(references, Reference{Type: ref.Type, Context: ctx})
+ if !hasParent {
+ parent = ctx
+ hasParent = ref.Type == opentracing.ChildOfRef
+ }
+ }
+ if !hasParent && isValidReference(parent) {
+ // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
+ // the FollowFromRef as the parent
+ hasParent = true
+ }
+
+ rpcServer := false
+ if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok {
+ rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
+ }
+
+ var samplerTags []Tag
+ var ctx SpanContext
+ newTrace := false
+ if !hasParent || !parent.IsValid() {
+ newTrace = true
+ ctx.traceID.Low = t.randomID()
+ if t.options.gen128Bit {
+ ctx.traceID.High = t.options.highTraceIDGenerator()
+ }
+ ctx.spanID = SpanID(ctx.traceID.Low)
+ ctx.parentID = 0
+ ctx.flags = byte(0)
+ if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
+ ctx.flags |= (flagSampled | flagDebug)
+ samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}}
+ } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled {
+ ctx.flags |= flagSampled
+ samplerTags = tags
+ }
+ } else {
+ ctx.traceID = parent.traceID
+ if rpcServer && t.options.zipkinSharedRPCSpan {
+ // Support Zipkin's one-span-per-RPC model
+ ctx.spanID = parent.spanID
+ ctx.parentID = parent.parentID
+ } else {
+ ctx.spanID = SpanID(t.randomID())
+ ctx.parentID = parent.spanID
+ }
+ ctx.flags = parent.flags
+ }
+ if hasParent {
+ // copy baggage items
+ if l := len(parent.baggage); l > 0 {
+ ctx.baggage = make(map[string]string, len(parent.baggage))
+ for k, v := range parent.baggage {
+ ctx.baggage[k] = v
+ }
+ }
+ }
+
+ sp := t.newSpan()
+ sp.context = ctx
+ sp.observer = t.observer.OnStartSpan(sp, operationName, options)
+ return t.startSpanInternal(
+ sp,
+ operationName,
+ options.StartTime,
+ samplerTags,
+ options.Tags,
+ newTrace,
+ rpcServer,
+ references,
+ )
+}
+
+// Inject implements Inject() method of opentracing.Tracer
+func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
+ c, ok := ctx.(SpanContext)
+ if !ok {
+ return opentracing.ErrInvalidSpanContext
+ }
+ if injector, ok := t.injectors[format]; ok {
+ return injector.Inject(c, carrier)
+ }
+ return opentracing.ErrUnsupportedFormat
+}
+
+// Extract implements Extract() method of opentracing.Tracer
+func (t *Tracer) Extract(
+ format interface{},
+ carrier interface{},
+) (opentracing.SpanContext, error) {
+ if extractor, ok := t.extractors[format]; ok {
+ return extractor.Extract(carrier)
+ }
+ return nil, opentracing.ErrUnsupportedFormat
+}
+
+// Close releases all resources used by the Tracer and flushes any remaining buffered spans.
+func (t *Tracer) Close() error {
+ t.reporter.Close()
+ t.sampler.Close()
+ if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
+ mgr.Close()
+ }
+ if throttler, ok := t.debugThrottler.(io.Closer); ok {
+ throttler.Close()
+ }
+ return nil
+}
+
+// Tags returns a slice of tracer-level tags.
+func (t *Tracer) Tags() []opentracing.Tag {
+ tags := make([]opentracing.Tag, len(t.tags))
+ for i, tag := range t.tags {
+ tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value}
+ }
+ return tags
+}
+
+// newSpan returns an instance of a clean Span object.
+// If options.PoolSpans is true, the spans are retrieved from an object pool.
+func (t *Tracer) newSpan() *Span {
+ if !t.options.poolSpans {
+ return &Span{}
+ }
+ sp := t.spanPool.Get().(*Span)
+ sp.context = emptyContext
+ sp.tracer = nil
+ sp.tags = nil
+ sp.logs = nil
+ return sp
+}
+
+func (t *Tracer) startSpanInternal(
+ sp *Span,
+ operationName string,
+ startTime time.Time,
+ internalTags []Tag,
+ tags opentracing.Tags,
+ newTrace bool,
+ rpcServer bool,
+ references []Reference,
+) *Span {
+ sp.tracer = t
+ sp.operationName = operationName
+ sp.startTime = startTime
+ sp.duration = 0
+ sp.references = references
+ sp.firstInProcess = rpcServer || sp.context.parentID == 0
+ if len(tags) > 0 || len(internalTags) > 0 {
+ sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags))
+ copy(sp.tags, internalTags)
+ for k, v := range tags {
+ sp.observer.OnSetTag(k, v)
+ if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) {
+ continue
+ }
+ sp.setTagNoLocking(k, v)
+ }
+ }
+ // emit metrics
+ if sp.context.IsSampled() {
+ t.metrics.SpansStartedSampled.Inc(1)
+ if newTrace {
+ // We cannot simply check for parentID==0 because in Zipkin model the
+ // server-side RPC span has the exact same trace/span/parent IDs as the
+ // calling client-side span, but obviously the server side span is
+ // no longer a root span of the trace.
+ t.metrics.TracesStartedSampled.Inc(1)
+ } else if sp.firstInProcess {
+ t.metrics.TracesJoinedSampled.Inc(1)
+ }
+ } else {
+ t.metrics.SpansStartedNotSampled.Inc(1)
+ if newTrace {
+ t.metrics.TracesStartedNotSampled.Inc(1)
+ } else if sp.firstInProcess {
+ t.metrics.TracesJoinedNotSampled.Inc(1)
+ }
+ }
+ return sp
+}
+
+func (t *Tracer) reportSpan(sp *Span) {
+ t.metrics.SpansFinished.Inc(1)
+ if sp.context.IsSampled() {
+ t.reporter.Report(sp)
+ }
+ if t.options.poolSpans {
+ t.spanPool.Put(sp)
+ }
+}
+
+// randomID generates a random trace/span ID, using tracer.random() generator.
+// It never returns 0.
+func (t *Tracer) randomID() uint64 {
+ val := t.randomNumber()
+ for val == 0 {
+ val = t.randomNumber()
+ }
+ return val
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) setBaggage(sp *Span, key, value string) {
+ t.baggageSetter.setBaggage(sp, key, value)
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) isDebugAllowed(operation string) bool {
+ return t.debugThrottler.IsAllowed(operation)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
new file mode 100644
index 000000000..a90265f03
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ "github.com/uber/jaeger-client-go/internal/throttler"
+)
+
+// TracerOption is a function that sets some option on the tracer
+type TracerOption func(tracer *Tracer)
+
+// TracerOptions is a factory for all available TracerOption's
+var TracerOptions tracerOptions
+
+type tracerOptions struct{}
+
+// Metrics creates a TracerOption that initializes Metrics on the tracer,
+// which is used to emit statistics.
+func (tracerOptions) Metrics(m *Metrics) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.metrics = *m
+ }
+}
+
+// Logger creates a TracerOption that gives the tracer a Logger.
+func (tracerOptions) Logger(logger Logger) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.logger = logger
+ }
+}
+
+func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+ return func(tracer *Tracer) {
+ if headerKeys == nil {
+ return
+ }
+ textPropagator := newTextMapPropagator(headerKeys.applyDefaults(), tracer.metrics)
+ tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+ httpHeaderPropagator := newHTTPHeaderPropagator(headerKeys.applyDefaults(), tracer.metrics)
+ tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+ }
+}
+
+// TimeNow creates a TracerOption that gives the tracer a function
+// used to generate timestamps for spans.
+func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.timeNow = timeNow
+ }
+}
+
+// RandomNumber creates a TracerOption that gives the tracer
+// a thread-safe random number generator function for generating trace IDs.
+func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.randomNumber = randomNumber
+ }
+}
+
+// PoolSpans creates a TracerOption that tells the tracer whether it should use
+// an object pool to minimize span allocations.
+// This should be used with care, only if the service is not running any async tasks
+// that can access parent spans after those spans have been finished.
+func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.poolSpans = poolSpans
+ }
+}
+
+// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// If not set, the factory method will obtain the current IP address.
+// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
+func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.hostIPv4 = hostIPv4
+ }
+}
+
+func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.injectors[format] = injector
+ }
+}
+
+func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.extractors[format] = extractor
+ }
+}
+
+func (t tracerOptions) Observer(observer Observer) TracerOption {
+ return t.ContribObserver(&oldObserver{obs: observer})
+}
+
+func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.observer.append(observer)
+ }
+}
+
+func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.gen128Bit = gen128Bit
+ }
+}
+
+func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.highTraceIDGenerator = highTraceIDGenerator
+ }
+}
+
+func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.maxTagValueLength = maxTagValueLength
+ }
+}
+
+func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+ }
+}
+
+func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.tags = append(tracer.tags, Tag{key: key, value: value})
+ }
+}
+
+func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.baggageRestrictionManager = mgr
+ }
+}
+
+func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.debugThrottler = throttler
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go
new file mode 100644
index 000000000..c5f5b1955
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "io"
+)
+
+// Transport abstracts the method of sending spans out of process.
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+type Transport interface {
+ // Append converts the span to the wire representation and adds it
+ // to sender's internal buffer. If the buffer exceeds its designated
+ // size, the transport should call Flush() and return the number of spans
+ // flushed, otherwise return 0. If error is returned, the returned number
+ // of spans is treated as failed span, and reported to metrics accordingly.
+ Append(span *Span) (int, error)
+
+ // Flush submits the internal buffer to the remote server. It returns the
+ // number of spans flushed. If error is returned, the returned number of
+ // spans is treated as failed span, and reported to metrics accordingly.
+ Flush() (int, error)
+
+ io.Closer
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
new file mode 100644
index 000000000..6b961fb63
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport defines various transports that can be used with
+// RemoteReporter to send spans out of process. Transport is responsible
+// for serializing the spans into a specific format suitable for sending
+// to the tracing backend. Examples may include Thrift over UDP, Thrift
+// or JSON over HTTP, Thrift over Kafka, etc.
+//
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+package transport
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
new file mode 100644
index 000000000..bc1b3e6b0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/http.go
@@ -0,0 +1,163 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/uber/jaeger-client-go/thrift"
+
+ "github.com/uber/jaeger-client-go"
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+// Default timeout for http request in seconds
+const defaultHTTPTimeout = time.Second * 5
+
+// HTTPTransport implements Transport by forwarding spans to a http server.
+type HTTPTransport struct {
+ url string
+ client *http.Client
+ batchSize int
+ spans []*j.Span
+ process *j.Process
+ httpCredentials *HTTPBasicAuthCredentials
+}
+
+// HTTPBasicAuthCredentials stores credentials for HTTP basic auth.
+type HTTPBasicAuthCredentials struct {
+ username string
+ password string
+}
+
+// HTTPOption sets a parameter for the HttpCollector
+type HTTPOption func(c *HTTPTransport)
+
+// HTTPTimeout sets maximum timeout for http request.
+func HTTPTimeout(duration time.Duration) HTTPOption {
+ return func(c *HTTPTransport) { c.client.Timeout = duration }
+}
+
+// HTTPBatchSize sets the maximum batch size, after which a collect will be
+// triggered. The default batch size is 100 spans.
+func HTTPBatchSize(n int) HTTPOption {
+ return func(c *HTTPTransport) { c.batchSize = n }
+}
+
+// HTTPBasicAuth sets the credentials required to perform HTTP basic auth
+func HTTPBasicAuth(username string, password string) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password}
+ }
+}
+
+// HTTPRoundTripper configures the underlying Transport on the *http.Client
+// that is used
+func HTTPRoundTripper(transport http.RoundTripper) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.client.Transport = transport
+ }
+}
+
+// NewHTTPTransport returns a new HTTP-backend transport. url should be an http
+// url of the collector to handle POST request, typically something like:
+// http://hostname:14268/api/traces?format=jaeger.thrift
+func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport {
+ c := &HTTPTransport{
+ url: url,
+ client: &http.Client{Timeout: defaultHTTPTimeout},
+ batchSize: 100,
+ spans: []*j.Span{},
+ }
+
+ for _, option := range options {
+ option(c)
+ }
+ return c
+}
+
+// Append implements Transport.
+func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) {
+ if c.process == nil {
+ c.process = jaeger.BuildJaegerProcessThrift(span)
+ }
+ jSpan := jaeger.BuildJaegerThrift(span)
+ c.spans = append(c.spans, jSpan)
+ if len(c.spans) >= c.batchSize {
+ return c.Flush()
+ }
+ return 0, nil
+}
+
+// Flush implements Transport.
+func (c *HTTPTransport) Flush() (int, error) {
+ count := len(c.spans)
+ if count == 0 {
+ return 0, nil
+ }
+ err := c.send(c.spans)
+ c.spans = c.spans[:0]
+ return count, err
+}
+
+// Close implements Transport.
+func (c *HTTPTransport) Close() error {
+ return nil
+}
+
+func (c *HTTPTransport) send(spans []*j.Span) error {
+ batch := &j.Batch{
+ Spans: spans,
+ Process: c.process,
+ }
+ body, err := serializeThrift(batch)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("POST", c.url, body)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/x-thrift")
+
+ if c.httpCredentials != nil {
+ req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password)
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return err
+ }
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ if resp.StatusCode >= http.StatusBadRequest {
+ return fmt.Errorf("error from collector: %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) {
+ t := thrift.NewTMemoryBuffer()
+ p := thrift.NewTBinaryProtocolTransport(t)
+ if err := obj.Write(p); err != nil {
+ return nil, err
+ }
+ return t.Buffer, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
new file mode 100644
index 000000000..7b9ccf937
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
@@ -0,0 +1,131 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/uber/jaeger-client-go/thrift"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// Empirically obtained constant for how many bytes in the message are used for envelope.
+// The total datagram size is:
+// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
+// There is a unit test `TestEmitBatchOverhead` that validates this number.
+// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
+// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
+const emitBatchOverhead = 30
+
+var errSpanTooLarge = errors.New("Span is too large")
+
+type udpSender struct {
+ client *utils.AgentClientUDP
+ maxPacketSize int // max size of datagram in bytes
+ maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram
+ byteBufferSize int // current number of span bytes accumulated in the buffer
+ spanBuffer []*j.Span // spans buffered before a flush
+ thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+ thriftProtocol thrift.TProtocol
+ process *j.Process
+ processByteSize int
+}
+
+// NewUDPTransport creates a reporter that submits spans to jaeger-agent
+func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
+ if len(hostPort) == 0 {
+ hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
+ }
+ if maxPacketSize == 0 {
+ maxPacketSize = utils.UDPPacketMaxLength
+ }
+
+ protocolFactory := thrift.NewTCompactProtocolFactory()
+
+ // Each span is first written to thriftBuffer to determine its size in bytes.
+ thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+ thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
+
+ client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize)
+ if err != nil {
+ return nil, err
+ }
+
+ sender := &udpSender{
+ client: client,
+ maxSpanBytes: maxPacketSize - emitBatchOverhead,
+ thriftBuffer: thriftBuffer,
+ thriftProtocol: thriftProtocol}
+ return sender, nil
+}
+
+func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
+ s.thriftBuffer.Reset()
+ thriftStruct.Write(s.thriftProtocol)
+ return s.thriftBuffer.Len()
+}
+
+func (s *udpSender) Append(span *Span) (int, error) {
+ if s.process == nil {
+ s.process = BuildJaegerProcessThrift(span)
+ s.processByteSize = s.calcSizeOfSerializedThrift(s.process)
+ s.byteBufferSize += s.processByteSize
+ }
+ jSpan := BuildJaegerThrift(span)
+ spanSize := s.calcSizeOfSerializedThrift(jSpan)
+ if spanSize > s.maxSpanBytes {
+ return 1, errSpanTooLarge
+ }
+
+ s.byteBufferSize += spanSize
+ if s.byteBufferSize <= s.maxSpanBytes {
+ s.spanBuffer = append(s.spanBuffer, jSpan)
+ if s.byteBufferSize < s.maxSpanBytes {
+ return 0, nil
+ }
+ return s.Flush()
+ }
+ // the latest span did not fit in the buffer
+ n, err := s.Flush()
+ s.spanBuffer = append(s.spanBuffer, jSpan)
+ s.byteBufferSize = spanSize + s.processByteSize
+ return n, err
+}
+
+func (s *udpSender) Flush() (int, error) {
+ n := len(s.spanBuffer)
+ if n == 0 {
+ return 0, nil
+ }
+ err := s.client.EmitBatch(&j.Batch{Process: s.process, Spans: s.spanBuffer})
+ s.resetBuffers()
+
+ return n, err
+}
+
+func (s *udpSender) Close() error {
+ return s.client.Close()
+}
+
+func (s *udpSender) resetBuffers() {
+ for i := range s.spanBuffer {
+ s.spanBuffer[i] = nil
+ }
+ s.spanBuffer = s.spanBuffer[:0]
+ s.byteBufferSize = s.processByteSize
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
new file mode 100644
index 000000000..237211f82
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`.
+func GetJSON(url string, out interface{}) error {
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ return ReadJSON(resp, out)
+}
+
+// ReadJSON reads JSON from http.Response and parses it into `out`
+func ReadJSON(resp *http.Response, out interface{}) error {
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+ }
+
+ if out == nil {
+ io.Copy(ioutil.Discard, resp.Body)
+ return nil
+ }
+
+ decoder := json.NewDecoder(resp.Body)
+ return decoder.Decode(out)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
new file mode 100644
index 000000000..b51af7713
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "errors"
+ "net"
+)
+
+// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go
+
+// scoreAddr scores how likely the given addr is to be a remote address and returns the
+// IP to use when listening. Any address which receives a negative score should not be used.
+// Scores are calculated as:
+// -1 for any unknown IP addresses.
+// +300 for IPv4 addresses
+// +100 for non-local addresses, extra +100 for "up" interaces.
+func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) {
+ var ip net.IP
+ if netAddr, ok := addr.(*net.IPNet); ok {
+ ip = netAddr.IP
+ } else if netIP, ok := addr.(*net.IPAddr); ok {
+ ip = netIP.IP
+ } else {
+ return -1, nil
+ }
+
+ var score int
+ if ip.To4() != nil {
+ score += 300
+ }
+ if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() {
+ score += 100
+ if iface.Flags&net.FlagUp != 0 {
+ score += 100
+ }
+ }
+ return score, ip
+}
+
+// HostIP tries to find an IP that can be used by other machines to reach this machine.
+func HostIP() (net.IP, error) {
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ bestScore := -1
+ var bestIP net.IP
+ // Select the highest scoring IP as the best IP.
+ for _, iface := range interfaces {
+ addrs, err := iface.Addrs()
+ if err != nil {
+ // Skip this interface if there is an error.
+ continue
+ }
+
+ for _, addr := range addrs {
+ score, ip := scoreAddr(iface, addr)
+ if score > bestScore {
+ bestScore = score
+ bestIP = ip
+ }
+ }
+ }
+
+ if bestScore == -1 {
+ return nil, errors.New("no addresses to listen on")
+ }
+
+ return bestIP, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
new file mode 100644
index 000000000..9875f7f55
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// lockedSource allows a random number generator to be used by multiple goroutines concurrently.
+// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed.
+type lockedSource struct {
+ mut sync.Mutex
+ src rand.Source
+}
+
+// NewRand returns a rand.Rand that is threadsafe.
+func NewRand(seed int64) *rand.Rand {
+ return rand.New(&lockedSource{src: rand.NewSource(seed)})
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.mut.Lock()
+ n = r.src.Int63()
+ r.mut.Unlock()
+ return
+}
+
+// Seed implements Seed() of Source
+func (r *lockedSource) Seed(seed int64) {
+ r.mut.Lock()
+ r.src.Seed(seed)
+ r.mut.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
new file mode 100644
index 000000000..1b8db9758
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "sync"
+ "time"
+)
+
+// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
+type RateLimiter interface {
+ CheckCredit(itemCost float64) bool
+}
+
+type rateLimiter struct {
+ sync.Mutex
+
+ creditsPerSecond float64
+ balance float64
+ maxBalance float64
+ lastTick time.Time
+
+ timeNow func() time.Time
+}
+
+// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a
+// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
+// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
+// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
+// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false.
+//
+// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the
+// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message
+// to determine if the message is within the rate limit.
+//
+// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
+// as bytes/second, and calling CheckCredit() with the actual message size.
+func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter {
+ return &rateLimiter{
+ creditsPerSecond: creditsPerSecond,
+ balance: maxBalance,
+ maxBalance: maxBalance,
+ lastTick: time.Now(),
+ timeNow: time.Now}
+}
+
+func (b *rateLimiter) CheckCredit(itemCost float64) bool {
+ b.Lock()
+ defer b.Unlock()
+ // calculate how much time passed since the last tick, and update current tick
+ currentTime := b.timeNow()
+ elapsedTime := currentTime.Sub(b.lastTick)
+ b.lastTick = currentTime
+ // calculate how much credit have we accumulated since the last tick
+ b.balance += elapsedTime.Seconds() * b.creditsPerSecond
+ if b.balance > b.maxBalance {
+ b.balance = b.maxBalance
+ }
+ // if we have enough credits to pay for current item, then reduce balance and allow
+ if b.balance >= itemCost {
+ b.balance -= itemCost
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
new file mode 100644
index 000000000..6f042073d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+
+ "github.com/uber/jaeger-client-go/thrift"
+
+ "github.com/uber/jaeger-client-go/thrift-gen/agent"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent
+const UDPPacketMaxLength = 65000
+
+// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface.
+type AgentClientUDP struct {
+ agent.Agent
+ io.Closer
+
+ connUDP *net.UDPConn
+ client *agent.AgentClient
+ maxPacketSize int // max size of datagram in bytes
+ thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+}
+
+// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
+ if maxPacketSize == 0 {
+ maxPacketSize = UDPPacketMaxLength
+ }
+
+ thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+ protocolFactory := thrift.NewTCompactProtocolFactory()
+ client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
+
+ destAddr, err := net.ResolveUDPAddr("udp", hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr)
+ if err != nil {
+ return nil, err
+ }
+ if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil {
+ return nil, err
+ }
+
+ clientUDP := &AgentClientUDP{
+ connUDP: connUDP,
+ client: client,
+ maxPacketSize: maxPacketSize,
+ thriftBuffer: thriftBuffer}
+ return clientUDP, nil
+}
+
+// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
+func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error {
+ return errors.New("Not implemented")
+}
+
+// EmitBatch implements EmitBatch() of Agent interface
+func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error {
+ a.thriftBuffer.Reset()
+ a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages
+ if err := a.client.EmitBatch(batch); err != nil {
+ return err
+ }
+ if a.thriftBuffer.Len() > a.maxPacketSize {
+ return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d",
+ a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
+ }
+ _, err := a.connUDP.Write(a.thriftBuffer.Bytes())
+ return err
+}
+
+// Close implements Close() of io.Closer and closes the underlying UDP connection.
+func (a *AgentClientUDP) Close() error {
+ return a.connUDP.Close()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
new file mode 100644
index 000000000..ac3c325d1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // ErrEmptyIP an error for empty ip strings
+ ErrEmptyIP = errors.New("empty string given for ip")
+
+ // ErrNotHostColonPort an error for invalid host port string
+ ErrNotHostColonPort = errors.New("expecting host:port")
+
+ // ErrNotFourOctets an error for the wrong number of octets after splitting a string
+ ErrNotFourOctets = errors.New("Wrong number of octets")
+)
+
+// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32
+func ParseIPToUint32(ip string) (uint32, error) {
+ if ip == "" {
+ return 0, ErrEmptyIP
+ }
+
+ if ip == "localhost" {
+ return 127<<24 | 1, nil
+ }
+
+ octets := strings.Split(ip, ".")
+ if len(octets) != 4 {
+ return 0, ErrNotFourOctets
+ }
+
+ var intIP uint32
+ for i := 0; i < 4; i++ {
+ octet, err := strconv.Atoi(octets[i])
+ if err != nil {
+ return 0, err
+ }
+ intIP = (intIP << 8) | uint32(octet)
+ }
+
+ return intIP, nil
+}
+
+// ParsePort converts port number from string to uin16
+func ParsePort(portString string) (uint16, error) {
+ port, err := strconv.ParseUint(portString, 10, 16)
+ return uint16(port), err
+}
+
+// PackIPAsUint32 packs an IPv4 as uint32
+func PackIPAsUint32(ip net.IP) uint32 {
+ if ipv4 := ip.To4(); ipv4 != nil {
+ return binary.BigEndian.Uint32(ipv4)
+ }
+ return 0
+}
+
+// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long
+// representing time since epoch in microseconds, which is used expected
+// in the Jaeger spans encoded as Thrift.
+func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 {
+ // ^^^ Passing time.Time by value is faster than passing a pointer!
+ // BenchmarkTimeByValue-8 2000000000 1.37 ns/op
+ // BenchmarkTimeByPtr-8 2000000000 1.98 ns/op
+
+ return t.UnixNano() / 1000
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
new file mode 100644
index 000000000..636952b7f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go"
+)
+
+// ZipkinSpanFormat is an OpenTracing carrier format constant
+const ZipkinSpanFormat = "zipkin-span-format"
+
+// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type ExtractableZipkinSpan interface {
+ TraceID() uint64
+ SpanID() uint64
+ ParentID() uint64
+ Flags() byte
+}
+
+// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type InjectableZipkinSpan interface {
+ SetTraceID(traceID uint64)
+ SetSpanID(spanID uint64)
+ SetParentID(parentID uint64)
+ SetFlags(flags byte)
+}
+
+type zipkinPropagator struct {
+ tracer *Tracer
+}
+
+func (p *zipkinPropagator) Inject(
+ ctx SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(InjectableZipkinSpan)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
+ carrier.SetSpanID(uint64(ctx.SpanID()))
+ carrier.SetParentID(uint64(ctx.ParentID()))
+ carrier.SetFlags(ctx.flags)
+ return nil
+}
+
+func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(ExtractableZipkinSpan)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ if carrier.TraceID() == 0 {
+ return emptyContext, opentracing.ErrSpanContextNotFound
+ }
+ var ctx SpanContext
+ ctx.traceID.Low = carrier.TraceID()
+ ctx.spanID = SpanID(carrier.SpanID())
+ ctx.parentID = SpanID(carrier.ParentID())
+ ctx.flags = carrier.Flags()
+ return ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
new file mode 100644
index 000000000..dce58b433
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
@@ -0,0 +1,322 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "encoding/binary"
+ "fmt"
+ "time"
+
+ "github.com/opentracing/opentracing-go/ext"
+
+ "github.com/uber/jaeger-client-go/internal/spanlog"
+ z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ // Zipkin UI does not work well with non-string tag values
+ allowPackedNumbers = false
+)
+
+var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
+ string(ext.SpanKind): setSpanKind,
+ string(ext.PeerHostIPv4): setPeerIPv4,
+ string(ext.PeerPort): setPeerPort,
+ string(ext.PeerService): setPeerService,
+ TracerIPTagKey: removeTag,
+}
+
+// BuildZipkinThrift builds thrift span based on internal span.
+func BuildZipkinThrift(s *Span) *z.Span {
+ span := &zipkinSpan{Span: s}
+ span.handleSpecialTags()
+ parentID := int64(span.context.parentID)
+ var ptrParentID *int64
+ if parentID != 0 {
+ ptrParentID = &parentID
+ }
+ timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+ duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+ endpoint := &z.Endpoint{
+ ServiceName: span.tracer.serviceName,
+ Ipv4: int32(span.tracer.hostIPv4)}
+ thriftSpan := &z.Span{
+ TraceID: int64(span.context.traceID.Low), // TODO upgrade zipkin thrift and use TraceIdHigh
+ ID: int64(span.context.spanID),
+ ParentID: ptrParentID,
+ Name: span.operationName,
+ Timestamp: &timestamp,
+ Duration: &duration,
+ Debug: span.context.IsDebug(),
+ Annotations: buildAnnotations(span, endpoint),
+ BinaryAnnotations: buildBinaryAnnotations(span, endpoint)}
+ return thriftSpan
+}
+
+func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation {
+ // automatically adding 2 Zipkin CoreAnnotations
+ annotations := make([]*z.Annotation, 0, 2+len(span.logs))
+ var startLabel, endLabel string
+ if span.spanKind == string(ext.SpanKindRPCClientEnum) {
+ startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV
+ } else if span.spanKind == string(ext.SpanKindRPCServerEnum) {
+ startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND
+ }
+ if !span.startTime.IsZero() && startLabel != "" {
+ start := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime),
+ Value: startLabel,
+ Host: endpoint}
+ annotations = append(annotations, start)
+ if span.duration != 0 {
+ endTs := span.startTime.Add(span.duration)
+ end := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs),
+ Value: endLabel,
+ Host: endpoint}
+ annotations = append(annotations, end)
+ }
+ }
+ for _, log := range span.logs {
+ anno := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+ Host: endpoint}
+ if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil {
+ anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength)
+ } else {
+ anno.Value = err.Error()
+ }
+ annotations = append(annotations, anno)
+ }
+ return annotations
+}
+
+func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation {
+ // automatically adding local component or server/client address tag, and client version
+ annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags))
+
+ if span.peerDefined() && span.isRPC() {
+ peer := z.Endpoint{
+ Ipv4: span.peer.Ipv4,
+ Port: span.peer.Port,
+ ServiceName: span.peer.ServiceName}
+ label := z.CLIENT_ADDR
+ if span.isRPCClient() {
+ label = z.SERVER_ADDR
+ }
+ anno := &z.BinaryAnnotation{
+ Key: label,
+ Value: []byte{1},
+ AnnotationType: z.AnnotationType_BOOL,
+ Host: &peer}
+ annotations = append(annotations, anno)
+ }
+ if !span.isRPC() {
+ componentName := endpoint.ServiceName
+ for _, tag := range span.tags {
+ if tag.key == string(ext.Component) {
+ componentName = stringify(tag.value)
+ break
+ }
+ }
+ local := &z.BinaryAnnotation{
+ Key: z.LOCAL_COMPONENT,
+ Value: []byte(componentName),
+ AnnotationType: z.AnnotationType_STRING,
+ Host: endpoint}
+ annotations = append(annotations, local)
+ }
+ for _, tag := range span.tags {
+ // "Special tags" are already handled by this point, we'd be double reporting the
+ // tags if we don't skip here
+ if _, ok := specialTagHandlers[tag.key]; ok {
+ continue
+ }
+ if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil {
+ annotations = append(annotations, anno)
+ }
+ }
+ return annotations
+}
+
+func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation {
+ bann := &z.BinaryAnnotation{Key: key, Host: endpoint}
+ if value, ok := val.(string); ok {
+ bann.Value = []byte(truncateString(value, maxTagValueLength))
+ bann.AnnotationType = z.AnnotationType_STRING
+ } else if value, ok := val.([]byte); ok {
+ if len(value) > maxTagValueLength {
+ value = value[:maxTagValueLength]
+ }
+ bann.Value = value
+ bann.AnnotationType = z.AnnotationType_BYTES
+ } else if value, ok := val.(int32); ok && allowPackedNumbers {
+ bann.Value = int32ToBytes(value)
+ bann.AnnotationType = z.AnnotationType_I32
+ } else if value, ok := val.(int64); ok && allowPackedNumbers {
+ bann.Value = int64ToBytes(value)
+ bann.AnnotationType = z.AnnotationType_I64
+ } else if value, ok := val.(int); ok && allowPackedNumbers {
+ bann.Value = int64ToBytes(int64(value))
+ bann.AnnotationType = z.AnnotationType_I64
+ } else if value, ok := val.(bool); ok {
+ bann.Value = []byte{boolToByte(value)}
+ bann.AnnotationType = z.AnnotationType_BOOL
+ } else {
+ value := stringify(val)
+ bann.Value = []byte(truncateString(value, maxTagValueLength))
+ bann.AnnotationType = z.AnnotationType_STRING
+ }
+ return bann
+}
+
+func stringify(value interface{}) string {
+ if s, ok := value.(string); ok {
+ return s
+ }
+ return fmt.Sprintf("%+v", value)
+}
+
+func truncateString(value string, maxLength int) string {
+ // we ignore the problem of utf8 runes possibly being sliced in the middle,
+ // as it is rather expensive to iterate through each tag just to find rune
+ // boundaries.
+ if len(value) > maxLength {
+ return value[:maxLength]
+ }
+ return value
+}
+
+func boolToByte(b bool) byte {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// int32ToBytes converts int32 to bytes.
+func int32ToBytes(i int32) []byte {
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(i))
+ return buf
+}
+
+// int64ToBytes converts int64 to bytes.
+func int64ToBytes(i int64) []byte {
+ buf := make([]byte, 8)
+ binary.BigEndian.PutUint64(buf, uint64(i))
+ return buf
+}
+
+type zipkinSpan struct {
+ *Span
+
+ // peer points to the peer service participating in this span,
+ // e.g. the Client if this span is a server span,
+ // or Server if this span is a client span
+ peer struct {
+ Ipv4 int32
+ Port int16
+ ServiceName string
+ }
+
+ // used to distinguish local vs. RPC Server vs. RPC Client spans
+ spanKind string
+}
+
+func (s *zipkinSpan) handleSpecialTags() {
+ s.Lock()
+ defer s.Unlock()
+ if s.firstInProcess {
+ // append the process tags
+ s.tags = append(s.tags, s.tracer.tags...)
+ }
+ filteredTags := make([]Tag, 0, len(s.tags))
+ for _, tag := range s.tags {
+ if handler, ok := specialTagHandlers[tag.key]; ok {
+ handler(s, tag.value)
+ } else {
+ filteredTags = append(filteredTags, tag)
+ }
+ }
+ s.tags = filteredTags
+}
+
+func setSpanKind(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ s.spanKind = val
+ return
+ }
+ if val, ok := value.(ext.SpanKindEnum); ok {
+ s.spanKind = string(val)
+ }
+}
+
+func setPeerIPv4(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ if ip, err := utils.ParseIPToUint32(val); err == nil {
+ s.peer.Ipv4 = int32(ip)
+ return
+ }
+ }
+ if val, ok := value.(uint32); ok {
+ s.peer.Ipv4 = int32(val)
+ return
+ }
+ if val, ok := value.(int32); ok {
+ s.peer.Ipv4 = val
+ }
+}
+
+func setPeerPort(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ if port, err := utils.ParsePort(val); err == nil {
+ s.peer.Port = int16(port)
+ return
+ }
+ }
+ if val, ok := value.(uint16); ok {
+ s.peer.Port = int16(val)
+ return
+ }
+ if val, ok := value.(int); ok {
+ s.peer.Port = int16(val)
+ }
+}
+
+func setPeerService(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ s.peer.ServiceName = val
+ }
+}
+
+func removeTag(s *zipkinSpan, value interface{}) {}
+
+func (s *zipkinSpan) peerDefined() bool {
+ return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0
+}
+
+func (s *zipkinSpan) isRPC() bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum)
+}
+
+func (s *zipkinSpan) isRPCClient() bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.spanKind == string(ext.SpanKindRPCClientEnum)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-lib/README.md b/vendor/github.com/uber/jaeger-lib/README.md
new file mode 100644
index 000000000..144ed1fb8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/README.md
@@ -0,0 +1,27 @@
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+# jaeger-lib
+
+A collection of shared infrastructure libraries used by different
+components of [Jaeger](https://github.com/uber/jaeger) backend and [jaeger-client-go](https://github.com/uber/jaeger-client-go).
+This library is *not intended to be used standalone*, and provides *no guarantees of backwards compatibility*.
+
+The library's import path is `github.com/uber/jaeger-lib`.
+
+## How to Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## License
+
+[Apache 2.0 License](./LICENSE).
+
+
+[doc-img]: https://godoc.org/github.com/uber/jaeger-lib?status.svg
+[doc]: https://godoc.org/github.com/uber/jaeger-lib
+[ci-img]: https://travis-ci.org/jaegertracing/jaeger-lib.svg?branch=master
+[ci]: https://travis-ci.org/jaegertracing/jaeger-lib
+[cov-img]: https://coveralls.io/repos/jaegertracing/jaeger-lib/badge.svg?branch=master&service=github
+[cov]: https://coveralls.io/github/jaegertracing/jaeger-lib?branch=master
+
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
new file mode 100644
index 000000000..2a6a43efd
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Counter tracks the number of times an event has occurred
+type Counter interface {
+ // Inc adds the given value to the counter.
+ Inc(int64)
+}
+
+// NullCounter counter that does nothing
+var NullCounter Counter = nullCounter{}
+
+type nullCounter struct{}
+
+func (nullCounter) Inc(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
new file mode 100644
index 000000000..0ead061eb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// NSOptions defines the name and tags map associated with a factory namespace
+type NSOptions struct {
+ Name string
+ Tags map[string]string
+}
+
+// Options defines the information associated with a metric
+type Options struct {
+ Name string
+ Tags map[string]string
+ Help string
+}
+
+// TimerOptions defines the information associated with a metric
+type TimerOptions struct {
+ Name string
+ Tags map[string]string
+ Help string
+ Buckets []time.Duration
+}
+
+// HistogramOptions defines the information associated with a metric
+type HistogramOptions struct {
+ Name string
+ Tags map[string]string
+ Help string
+ Buckets []float64
+}
+
+// Factory creates new metrics
+type Factory interface {
+ Counter(metric Options) Counter
+ Timer(metric TimerOptions) Timer
+ Gauge(metric Options) Gauge
+ Histogram(metric HistogramOptions) Histogram
+
+ // Namespace returns a nested metrics factory.
+ Namespace(scope NSOptions) Factory
+}
+
+// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge.
+var NullFactory Factory = nullFactory{}
+
+type nullFactory struct{}
+
+func (nullFactory) Counter(options Options) Counter {
+ return NullCounter
+}
+func (nullFactory) Timer(options TimerOptions) Timer {
+ return NullTimer
+}
+func (nullFactory) Gauge(options Options) Gauge {
+ return NullGauge
+}
+func (nullFactory) Histogram(options HistogramOptions) Histogram {
+ return NullHistogram
+}
+func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory }
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
new file mode 100644
index 000000000..3c606391a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Gauge returns instantaneous measurements of something as an int64 value
+type Gauge interface {
+ // Update the gauge to the value passed in.
+ Update(int64)
+}
+
+// NullGauge gauge that does nothing
+var NullGauge Gauge = nullGauge{}
+
+type nullGauge struct{}
+
+func (nullGauge) Update(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
new file mode 100644
index 000000000..d3bd6174f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2018 The Jaeger Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Histogram that keeps track of a distribution of values.
+type Histogram interface {
+ // Records the value passed in.
+ Record(float64)
+}
+
+// NullHistogram that does nothing
+var NullHistogram Histogram = nullHistogram{}
+
+type nullHistogram struct{}
+
+func (nullHistogram) Record(float64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
new file mode 100644
index 000000000..c24445a10
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "sort"
+)
+
+// GetKey converts name+tags into a single string of the form
+// "name|tag1=value1|...|tagN=valueN", where tag names are
+// sorted alphabetically.
+func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string {
+ keys := make([]string, 0, len(tags))
+ for k := range tags {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ key := name
+ for _, k := range keys {
+ key = key + tagsSep + k + tagKVSep + tags[k]
+ }
+ return key
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
new file mode 100644
index 000000000..0c6396888
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// MustInit initializes the passed in metrics and initializes its fields using the passed in factory.
+//
+// It uses reflection to initialize a struct containing metrics fields
+// by assigning new Counter/Gauge/Timer values with the metric name retrieved
+// from the `metric` tag and stats tags retrieved from the `tags` tag.
+//
+// Note: all fields of the struct must be exported, have a `metric` tag, and be
+// of type Counter or Gauge or Timer.
+//
+// Errors during Init lead to a panic.
+func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) {
+ if err := Init(metrics, factory, globalTags); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Init does the same as Init, but returns an error instead of
+// panicking.
+func Init(m interface{}, factory Factory, globalTags map[string]string) error {
+ // Allow user to opt out of reporting metrics by passing in nil.
+ if factory == nil {
+ factory = NullFactory
+ }
+
+ counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem()
+ gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem()
+ timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem()
+ histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem()
+
+ v := reflect.ValueOf(m).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ tags := make(map[string]string)
+ for k, v := range globalTags {
+ tags[k] = v
+ }
+ var buckets []float64
+ field := t.Field(i)
+ metric := field.Tag.Get("metric")
+ if metric == "" {
+ return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name)
+ }
+ if tagString := field.Tag.Get("tags"); tagString != "" {
+ tagPairs := strings.Split(tagString, ",")
+ for _, tagPair := range tagPairs {
+ tag := strings.Split(tagPair, "=")
+ if len(tag) != 2 {
+ return fmt.Errorf(
+ "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]",
+ field.Name, tagPair, tagString)
+ }
+ tags[tag[0]] = tag[1]
+ }
+ }
+ if bucketString := field.Tag.Get("buckets"); bucketString != "" {
+ if field.Type.AssignableTo(timerPtrType) {
+ // TODO: Parse timer duration buckets
+ return fmt.Errorf(
+ "Field [%s]: Buckets are not currently initialized for timer metrics",
+ field.Name)
+ } else if field.Type.AssignableTo(histogramPtrType) {
+ bucketValues := strings.Split(bucketString, ",")
+ for _, bucket := range bucketValues {
+ b, err := strconv.ParseFloat(bucket, 64)
+ if err != nil {
+ return fmt.Errorf(
+ "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]",
+ field.Name, bucket, bucketString)
+ }
+ buckets = append(buckets, b)
+ }
+ } else {
+ return fmt.Errorf(
+ "Field [%s]: Buckets should only be defined for Timer and Histogram metric types",
+ field.Name)
+ }
+ }
+ help := field.Tag.Get("help")
+ var obj interface{}
+ if field.Type.AssignableTo(counterPtrType) {
+ obj = factory.Counter(Options{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(gaugePtrType) {
+ obj = factory.Gauge(Options{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(timerPtrType) {
+ // TODO: Add buckets once parsed (see TODO above)
+ obj = factory.Timer(TimerOptions{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(histogramPtrType) {
+ obj = factory.Histogram(HistogramOptions{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ Buckets: buckets,
+ })
+ } else {
+ return fmt.Errorf(
+ "Field %s is not a pointer to timer, gauge, or counter",
+ field.Name)
+ }
+ v.Field(i).Set(reflect.ValueOf(obj))
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
new file mode 100644
index 000000000..4a8abdb53
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// StartStopwatch begins recording the executing time of an event, returning
+// a Stopwatch that should be used to stop the recording the time for
+// that event. Multiple events can be occurring simultaneously each
+// represented by different active Stopwatches
+func StartStopwatch(timer Timer) Stopwatch {
+ return Stopwatch{t: timer, start: time.Now()}
+}
+
+// A Stopwatch tracks the execution time of a specific event
+type Stopwatch struct {
+ t Timer
+ start time.Time
+}
+
+// Stop stops executing of the stopwatch and records the amount of elapsed time
+func (s Stopwatch) Stop() {
+ s.t.Record(s.ElapsedTime())
+}
+
+// ElapsedTime returns the amount of elapsed time (in time.Duration)
+func (s Stopwatch) ElapsedTime() time.Duration {
+ return time.Since(s.start)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
new file mode 100644
index 000000000..e18d222ab
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// Timer accumulates observations about how long some operation took,
+// and also maintains a historgam of percentiles.
+type Timer interface {
+ // Records the time passed in.
+ Record(time.Duration)
+}
+
+// NullTimer timer that does nothing
+var NullTimer Timer = nullTimer{}
+
+type nullTimer struct{}
+
+func (nullTimer) Record(time.Duration) {}
diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md
deleted file mode 100644
index f2baef4f0..000000000
--- a/vendor/github.com/urfave/cli/README.md
+++ /dev/null
@@ -1,1526 +0,0 @@
-cli
-===
-
-[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli)
-[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli)
-[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli)
-[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli)
-[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli)
-[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) /
-[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc)
-
-This is the library formerly known as `github.com/codegangsta/cli` -- Github
-will automatically redirect requests to this repository, but we recommend
-updating your references for clarity.
-
-cli is a simple, fast, and fun package for building command line apps in Go. The
-goal is to enable developers to write fast and distributable command line
-applications in an expressive way.
-
-<!-- toc -->
-
-- [Overview](#overview)
-- [Installation](#installation)
- * [Supported platforms](#supported-platforms)
- * [Using the `v2` branch](#using-the-v2-branch)
- * [Pinning to the `v1` releases](#pinning-to-the-v1-releases)
-- [Getting Started](#getting-started)
-- [Examples](#examples)
- * [Arguments](#arguments)
- * [Flags](#flags)
- + [Placeholder Values](#placeholder-values)
- + [Alternate Names](#alternate-names)
- + [Ordering](#ordering)
- + [Values from the Environment](#values-from-the-environment)
- + [Values from files](#values-from-files)
- + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others)
- + [Precedence](#precedence)
- * [Subcommands](#subcommands)
- * [Subcommands categories](#subcommands-categories)
- * [Exit code](#exit-code)
- * [Bash Completion](#bash-completion)
- + [Enabling](#enabling)
- + [Distribution](#distribution)
- + [Customization](#customization)
- * [Generated Help Text](#generated-help-text)
- + [Customization](#customization-1)
- * [Version Flag](#version-flag)
- + [Customization](#customization-2)
- + [Full API Example](#full-api-example)
- * [Combining short Bool options](#combining-short-bool-options)
-- [Contribution Guidelines](#contribution-guidelines)
-
-<!-- tocstop -->
-
-## Overview
-
-Command line apps are usually so tiny that there is absolutely no reason why
-your code should *not* be self-documenting. Things like generating help text and
-parsing command flags/options should not hinder productivity when writing a
-command line app.
-
-**This is where cli comes into play.** cli makes command line programming fun,
-organized, and expressive!
-
-## Installation
-
-Make sure you have a working Go environment. Go version 1.2+ is supported. [See
-the install instructions for Go](http://golang.org/doc/install.html).
-
-To install cli, simply run:
-```
-$ go get github.com/urfave/cli
-```
-
-Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can
-be easily used:
-```
-export PATH=$PATH:$GOPATH/bin
-```
-
-### Supported platforms
-
-cli is tested against multiple versions of Go on Linux, and against the latest
-released version of Go on OS X and Windows. For full details, see
-[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml).
-
-### Using the `v2` branch
-
-**Warning**: The `v2` branch is currently unreleased and considered unstable.
-
-There is currently a long-lived branch named `v2` that is intended to land as
-the new `master` branch once development there has settled down. The current
-`master` branch (mirrored as `v1`) is being manually merged into `v2` on
-an irregular human-based schedule, but generally if one wants to "upgrade" to
-`v2` *now* and accept the volatility (read: "awesomeness") that comes along with
-that, please use whatever version pinning of your preference, such as via
-`gopkg.in`:
-
-```
-$ go get gopkg.in/urfave/cli.v2
-```
-
-``` go
-...
-import (
- "gopkg.in/urfave/cli.v2" // imports as package "cli"
-)
-...
-```
-
-### Pinning to the `v1` releases
-
-Similarly to the section above describing use of the `v2` branch, if one wants
-to avoid any unexpected compatibility pains once `v2` becomes `master`, then
-pinning to `v1` is an acceptable option, e.g.:
-
-```
-$ go get gopkg.in/urfave/cli.v1
-```
-
-``` go
-...
-import (
- "gopkg.in/urfave/cli.v1" // imports as package "cli"
-)
-...
-```
-
-This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing).
-
-## Getting Started
-
-One of the philosophies behind cli is that an API should be playful and full of
-discovery. So a cli app can be as little as one line of code in `main()`.
-
-<!-- {
- "args": ["&#45;&#45;help"],
- "output": "A new cli application"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- err := cli.NewApp().Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-This app will run and show help text, but is not very useful. Let's give an
-action to execute and some help documentation:
-
-<!-- {
- "output": "boom! I say!"
-} -->
-``` go
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
- app.Name = "boom"
- app.Usage = "make an explosive entrance"
- app.Action = func(c *cli.Context) error {
- fmt.Println("boom! I say!")
- return nil
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-Running this already gives you a ton of functionality, plus support for things
-like subcommands and flags, which are covered below.
-
-## Examples
-
-Being a programmer can be a lonely job. Thankfully by the power of automation
-that is not the case! Let's create a greeter app to fend off our demons of
-loneliness!
-
-Start by creating a directory named `greet`, and within it, add a file,
-`greet.go` with the following code in it:
-
-<!-- {
- "output": "Hello friend!"
-} -->
-``` go
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
- app.Name = "greet"
- app.Usage = "fight the loneliness!"
- app.Action = func(c *cli.Context) error {
- fmt.Println("Hello friend!")
- return nil
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-Install our command to the `$GOPATH/bin` directory:
-
-```
-$ go install
-```
-
-Finally run our new command:
-
-```
-$ greet
-Hello friend!
-```
-
-cli also generates neat help text:
-
-```
-$ greet help
-NAME:
- greet - fight the loneliness!
-
-USAGE:
- greet [global options] command [command options] [arguments...]
-
-VERSION:
- 0.0.0
-
-COMMANDS:
- help, h Shows a list of commands or help for one command
-
-GLOBAL OPTIONS
- --version Shows version information
-```
-
-### Arguments
-
-You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.:
-
-<!-- {
- "output": "Hello \""
-} -->
-``` go
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Action = func(c *cli.Context) error {
- fmt.Printf("Hello %q", c.Args().Get(0))
- return nil
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-### Flags
-
-Setting and querying flags is simple.
-
-<!-- {
- "output": "Hello Nefertiti"
-} -->
-``` go
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang",
- Value: "english",
- Usage: "language for the greeting",
- },
- }
-
- app.Action = func(c *cli.Context) error {
- name := "Nefertiti"
- if c.NArg() > 0 {
- name = c.Args().Get(0)
- }
- if c.String("lang") == "spanish" {
- fmt.Println("Hola", name)
- } else {
- fmt.Println("Hello", name)
- }
- return nil
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-You can also set a destination variable for a flag, to which the content will be
-scanned.
-
-<!-- {
- "output": "Hello someone"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
- "fmt"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- var language string
-
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang",
- Value: "english",
- Usage: "language for the greeting",
- Destination: &language,
- },
- }
-
- app.Action = func(c *cli.Context) error {
- name := "someone"
- if c.NArg() > 0 {
- name = c.Args()[0]
- }
- if language == "spanish" {
- fmt.Println("Hola", name)
- } else {
- fmt.Println("Hello", name)
- }
- return nil
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-See full list of flags at http://godoc.org/github.com/urfave/cli
-
-#### Placeholder Values
-
-Sometimes it's useful to specify a flag's value within the usage string itself.
-Such placeholders are indicated with back quotes.
-
-For example this:
-
-<!-- {
- "args": ["&#45;&#45;help"],
- "output": "&#45;&#45;config FILE, &#45;c FILE"
-} -->
-```go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag{
- cli.StringFlag{
- Name: "config, c",
- Usage: "Load configuration from `FILE`",
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-Will result in help output like:
-
-```
---config FILE, -c FILE Load configuration from FILE
-```
-
-Note that only the first placeholder is used. Subsequent back-quoted words will
-be left as-is.
-
-#### Alternate Names
-
-You can set alternate (or short) names for flags by providing a comma-delimited
-list for the `Name`. e.g.
-
-<!-- {
- "args": ["&#45;&#45;help"],
- "output": "&#45;&#45;lang value, &#45;l value.*language for the greeting.*default: \"english\""
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang, l",
- Value: "english",
- Usage: "language for the greeting",
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-That flag can then be set with `--lang spanish` or `-l spanish`. Note that
-giving two different forms of the same flag in the same command invocation is an
-error.
-
-#### Ordering
-
-Flags for the application and commands are shown in the order they are defined.
-However, it's possible to sort them from outside this library by using `FlagsByName`
-or `CommandsByName` with `sort`.
-
-For example this:
-
-<!-- {
- "args": ["&#45;&#45;help"],
- "output": "add a task to the list\n.*complete a task on the list\n.*\n\n.*\n.*Load configuration from FILE\n.*Language for the greeting.*"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
- "sort"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang, l",
- Value: "english",
- Usage: "Language for the greeting",
- },
- cli.StringFlag{
- Name: "config, c",
- Usage: "Load configuration from `FILE`",
- },
- }
-
- app.Commands = []cli.Command{
- {
- Name: "complete",
- Aliases: []string{"c"},
- Usage: "complete a task on the list",
- Action: func(c *cli.Context) error {
- return nil
- },
- },
- {
- Name: "add",
- Aliases: []string{"a"},
- Usage: "add a task to the list",
- Action: func(c *cli.Context) error {
- return nil
- },
- },
- }
-
- sort.Sort(cli.FlagsByName(app.Flags))
- sort.Sort(cli.CommandsByName(app.Commands))
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-Will result in help output like:
-
-```
---config FILE, -c FILE Load configuration from FILE
---lang value, -l value Language for the greeting (default: "english")
-```
-
-#### Values from the Environment
-
-You can also have the default value set from the environment via `EnvVar`. e.g.
-
-<!-- {
- "args": ["&#45;&#45;help"],
- "output": "language for the greeting.*APP_LANG"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang, l",
- Value: "english",
- Usage: "language for the greeting",
- EnvVar: "APP_LANG",
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-The `EnvVar` may also be given as a comma-delimited "cascade", where the first
-environment variable that resolves is used as the default.
-
-<!-- {
- "args": ["&#45;&#45;help"],
- "output": "language for the greeting.*LEGACY_COMPAT_LANG.*APP_LANG.*LANG"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang, l",
- Value: "english",
- Usage: "language for the greeting",
- EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG",
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-#### Values from files
-
-You can also have the default value set from file via `FilePath`. e.g.
-
-<!-- {
- "args": ["&#45;&#45;help"],
- "output": "password for the mysql database"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "password, p",
- Usage: "password for the mysql database",
- FilePath: "/etc/mysql/password",
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-Note that default values set from file (e.g. `FilePath`) take precedence over
-default values set from the enviornment (e.g. `EnvVar`).
-
-#### Values from alternate input sources (YAML, TOML, and others)
-
-There is a separate package altsrc that adds support for getting flag values
-from other file input sources.
-
-Currently supported input source formats:
-* YAML
-* TOML
-
-In order to get values for a flag from an alternate input source the following
-code would be added to wrap an existing cli.Flag like below:
-
-``` go
- altsrc.NewIntFlag(cli.IntFlag{Name: "test"})
-```
-
-Initialization must also occur for these flags. Below is an example initializing
-getting data from a yaml file below.
-
-``` go
- command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-```
-
-The code above will use the "load" string as a flag name to get the file name of
-a yaml file from the cli.Context. It will then use that file name to initialize
-the yaml input source for any flags that are defined on that command. As a note
-the "load" flag used would also have to be defined on the command flags in order
-for this code snipped to work.
-
-Currently only YAML and JSON files are supported but developers can add support
-for other input sources by implementing the altsrc.InputSourceContext for their
-given sources.
-
-Here is a more complete sample of a command using YAML support:
-
-<!-- {
- "args": ["test-cmd", "&#45;&#45;help"],
- "output": "&#45&#45;test value.*default: 0"
-} -->
-``` go
-package notmain
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
- "github.com/urfave/cli/altsrc"
-)
-
-func main() {
- app := cli.NewApp()
-
- flags := []cli.Flag{
- altsrc.NewIntFlag(cli.IntFlag{Name: "test"}),
- cli.StringFlag{Name: "load"},
- }
-
- app.Action = func(c *cli.Context) error {
- fmt.Println("yaml ist rad")
- return nil
- }
-
- app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load"))
- app.Flags = flags
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-#### Precedence
-
-The precedence for flag value sources is as follows (highest to lowest):
-
-0. Command line flag value from user
-0. Environment variable (if specified)
-0. Configuration file (if specified)
-0. Default defined on the flag
-
-### Subcommands
-
-Subcommands can be defined for a more git-like command line app.
-
-<!-- {
- "args": ["template", "add"],
- "output": "new task template: .+"
-} -->
-```go
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Commands = []cli.Command{
- {
- Name: "add",
- Aliases: []string{"a"},
- Usage: "add a task to the list",
- Action: func(c *cli.Context) error {
- fmt.Println("added task: ", c.Args().First())
- return nil
- },
- },
- {
- Name: "complete",
- Aliases: []string{"c"},
- Usage: "complete a task on the list",
- Action: func(c *cli.Context) error {
- fmt.Println("completed task: ", c.Args().First())
- return nil
- },
- },
- {
- Name: "template",
- Aliases: []string{"t"},
- Usage: "options for task templates",
- Subcommands: []cli.Command{
- {
- Name: "add",
- Usage: "add a new template",
- Action: func(c *cli.Context) error {
- fmt.Println("new task template: ", c.Args().First())
- return nil
- },
- },
- {
- Name: "remove",
- Usage: "remove an existing template",
- Action: func(c *cli.Context) error {
- fmt.Println("removed task template: ", c.Args().First())
- return nil
- },
- },
- },
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-### Subcommands categories
-
-For additional organization in apps that have many subcommands, you can
-associate a category for each command to group them together in the help
-output.
-
-E.g.
-
-```go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Commands = []cli.Command{
- {
- Name: "noop",
- },
- {
- Name: "add",
- Category: "Template actions",
- },
- {
- Name: "remove",
- Category: "Template actions",
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-Will include:
-
-```
-COMMANDS:
- noop
-
- Template actions:
- add
- remove
-```
-
-### Exit code
-
-Calling `App.Run` will not automatically call `os.Exit`, which means that by
-default the exit code will "fall through" to being `0`. An explicit exit code
-may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a
-`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.:
-
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
- app.Flags = []cli.Flag{
- cli.BoolTFlag{
- Name: "ginger-crouton",
- Usage: "is it in the soup?",
- },
- }
- app.Action = func(ctx *cli.Context) error {
- if !ctx.Bool("ginger-crouton") {
- return cli.NewExitError("it is not in the soup", 86)
- }
- return nil
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-### Bash Completion
-
-You can enable completion commands by setting the `EnableBashCompletion`
-flag on the `App` object. By default, this setting will only auto-complete to
-show an app's subcommands, but you can write your own completion methods for
-the App or its subcommands.
-
-<!-- {
- "args": ["complete", "&#45;&#45;generate&#45;bash&#45;completion"],
- "output": "laundry"
-} -->
-``` go
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"}
-
- app := cli.NewApp()
- app.EnableBashCompletion = true
- app.Commands = []cli.Command{
- {
- Name: "complete",
- Aliases: []string{"c"},
- Usage: "complete a task on the list",
- Action: func(c *cli.Context) error {
- fmt.Println("completed task: ", c.Args().First())
- return nil
- },
- BashComplete: func(c *cli.Context) {
- // This will complete if no args are passed
- if c.NArg() > 0 {
- return
- }
- for _, t := range tasks {
- fmt.Println(t)
- }
- },
- },
- }
-
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-#### Enabling
-
-Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while
-setting the `PROG` variable to the name of your program:
-
-`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete`
-
-#### Distribution
-
-Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename
-it to the name of the program you wish to add autocomplete support for (or
-automatically install it there if you are distributing a package). Don't forget
-to source the file to make it active in the current shell.
-
-```
-sudo cp src/bash_autocomplete /etc/bash_completion.d/<myprogram>
-source /etc/bash_completion.d/<myprogram>
-```
-
-Alternatively, you can just document that users should source the generic
-`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
-to the name of their program (as above).
-
-#### Customization
-
-The default bash completion flag (`--generate-bash-completion`) is defined as
-`cli.BashCompletionFlag`, and may be redefined if desired, e.g.:
-
-<!-- {
- "args": ["&#45;&#45;compgen"],
- "output": "wat\nhelp\nh"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- cli.BashCompletionFlag = cli.BoolFlag{
- Name: "compgen",
- Hidden: true,
- }
-
- app := cli.NewApp()
- app.EnableBashCompletion = true
- app.Commands = []cli.Command{
- {
- Name: "wat",
- },
- }
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-### Generated Help Text
-
-The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked
-by the cli internals in order to print generated help text for the app, command,
-or subcommand, and break execution.
-
-#### Customization
-
-All of the help text generation may be customized, and at multiple levels. The
-templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and
-`SubcommandHelpTemplate` which may be reassigned or augmented, and full override
-is possible by assigning a compatible func to the `cli.HelpPrinter` variable,
-e.g.:
-
-<!-- {
- "output": "Ha HA. I pwnd the help!!1"
-} -->
-``` go
-package main
-
-import (
- "fmt"
- "log"
- "io"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- // EXAMPLE: Append to an existing template
- cli.AppHelpTemplate = fmt.Sprintf(`%s
-
-WEBSITE: http://awesometown.example.com
-
-SUPPORT: support@awesometown.example.com
-
-`, cli.AppHelpTemplate)
-
- // EXAMPLE: Override a template
- cli.AppHelpTemplate = `NAME:
- {{.Name}} - {{.Usage}}
-USAGE:
- {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
- {{if len .Authors}}
-AUTHOR:
- {{range .Authors}}{{ . }}{{end}}
- {{end}}{{if .Commands}}
-COMMANDS:
-{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
-GLOBAL OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}{{if .Copyright }}
-COPYRIGHT:
- {{.Copyright}}
- {{end}}{{if .Version}}
-VERSION:
- {{.Version}}
- {{end}}
-`
-
- // EXAMPLE: Replace the `HelpPrinter` func
- cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
- fmt.Println("Ha HA. I pwnd the help!!1")
- }
-
- err := cli.NewApp().Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-The default flag may be customized to something other than `-h/--help` by
-setting `cli.HelpFlag`, e.g.:
-
-<!-- {
- "args": ["&#45;&#45halp"],
- "output": "haaaaalp.*HALP"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- cli.HelpFlag = cli.BoolFlag{
- Name: "halp, haaaaalp",
- Usage: "HALP",
- EnvVar: "SHOW_HALP,HALPPLZ",
- }
-
- err := cli.NewApp().Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-### Version Flag
-
-The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which
-is checked by the cli internals in order to print the `App.Version` via
-`cli.VersionPrinter` and break execution.
-
-#### Customization
-
-The default flag may be customized to something other than `-v/--version` by
-setting `cli.VersionFlag`, e.g.:
-
-<!-- {
- "args": ["&#45;&#45print-version"],
- "output": "partay version 19\\.99\\.0"
-} -->
-``` go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- cli.VersionFlag = cli.BoolFlag{
- Name: "print-version, V",
- Usage: "print only the version",
- }
-
- app := cli.NewApp()
- app.Name = "partay"
- app.Version = "19.99.0"
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.:
-
-<!-- {
- "args": ["&#45;&#45version"],
- "output": "version=19\\.99\\.0 revision=fafafaf"
-} -->
-``` go
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/urfave/cli"
-)
-
-var (
- Revision = "fafafaf"
-)
-
-func main() {
- cli.VersionPrinter = func(c *cli.Context) {
- fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision)
- }
-
- app := cli.NewApp()
- app.Name = "partay"
- app.Version = "19.99.0"
- err := app.Run(os.Args)
- if err != nil {
- log.Fatal(err)
- }
-}
-```
-
-#### Full API Example
-
-**Notice**: This is a contrived (functioning) example meant strictly for API
-demonstration purposes. Use of one's imagination is encouraged.
-
-<!-- {
- "output": "made it!\nPhew!"
-} -->
-``` go
-package main
-
-import (
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "time"
-
- "github.com/urfave/cli"
-)
-
-func init() {
- cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n"
- cli.CommandHelpTemplate += "\nYMMV\n"
- cli.SubcommandHelpTemplate += "\nor something\n"
-
- cli.HelpFlag = cli.BoolFlag{Name: "halp"}
- cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true}
- cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"}
-
- cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
- fmt.Fprintf(w, "best of luck to you\n")
- }
- cli.VersionPrinter = func(c *cli.Context) {
- fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version)
- }
- cli.OsExiter = func(c int) {
- fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c)
- }
- cli.ErrWriter = ioutil.Discard
- cli.FlagStringer = func(fl cli.Flag) string {
- return fmt.Sprintf("\t\t%s", fl.GetName())
- }
-}
-
-type hexWriter struct{}
-
-func (w *hexWriter) Write(p []byte) (int, error) {
- for _, b := range p {
- fmt.Printf("%x", b)
- }
- fmt.Printf("\n")
-
- return len(p), nil
-}
-
-type genericType struct{
- s string
-}
-
-func (g *genericType) Set(value string) error {
- g.s = value
- return nil
-}
-
-func (g *genericType) String() string {
- return g.s
-}
-
-func main() {
- app := cli.NewApp()
- app.Name = "kənˈtrīv"
- app.Version = "19.99.0"
- app.Compiled = time.Now()
- app.Authors = []cli.Author{
- cli.Author{
- Name: "Example Human",
- Email: "human@example.com",
- },
- }
- app.Copyright = "(c) 1999 Serious Enterprise"
- app.HelpName = "contrive"
- app.Usage = "demonstrate available API"
- app.UsageText = "contrive - demonstrating the available API"
- app.ArgsUsage = "[args and such]"
- app.Commands = []cli.Command{
- cli.Command{
- Name: "doo",
- Aliases: []string{"do"},
- Category: "motion",
- Usage: "do the doo",
- UsageText: "doo - does the dooing",
- Description: "no really, there is a lot of dooing to be done",
- ArgsUsage: "[arrgh]",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "forever, forevvarr"},
- },
- Subcommands: cli.Commands{
- cli.Command{
- Name: "wop",
- Action: wopAction,
- },
- },
- SkipFlagParsing: false,
- HideHelp: false,
- Hidden: false,
- HelpName: "doo!",
- BashComplete: func(c *cli.Context) {
- fmt.Fprintf(c.App.Writer, "--better\n")
- },
- Before: func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "brace for impact\n")
- return nil
- },
- After: func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "did we lose anyone?\n")
- return nil
- },
- Action: func(c *cli.Context) error {
- c.Command.FullName()
- c.Command.HasName("wop")
- c.Command.Names()
- c.Command.VisibleFlags()
- fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n")
- if c.Bool("forever") {
- c.Command.Run(c)
- }
- return nil
- },
- OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error {
- fmt.Fprintf(c.App.Writer, "for shame\n")
- return err
- },
- },
- }
- app.Flags = []cli.Flag{
- cli.BoolFlag{Name: "fancy"},
- cli.BoolTFlag{Name: "fancier"},
- cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3},
- cli.Float64Flag{Name: "howmuch"},
- cli.GenericFlag{Name: "wat", Value: &genericType{}},
- cli.Int64Flag{Name: "longdistance"},
- cli.Int64SliceFlag{Name: "intervals"},
- cli.IntFlag{Name: "distance"},
- cli.IntSliceFlag{Name: "times"},
- cli.StringFlag{Name: "dance-move, d"},
- cli.StringSliceFlag{Name: "names, N"},
- cli.UintFlag{Name: "age"},
- cli.Uint64Flag{Name: "bigage"},
- }
- app.EnableBashCompletion = true
- app.HideHelp = false
- app.HideVersion = false
- app.BashComplete = func(c *cli.Context) {
- fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n")
- }
- app.Before = func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n")
- return nil
- }
- app.After = func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "Phew!\n")
- return nil
- }
- app.CommandNotFound = func(c *cli.Context, command string) {
- fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command)
- }
- app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error {
- if isSubcommand {
- return err
- }
-
- fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err)
- return nil
- }
- app.Action = func(c *cli.Context) error {
- cli.DefaultAppComplete(c)
- cli.HandleExitCoder(errors.New("not an exit coder, though"))
- cli.ShowAppHelp(c)
- cli.ShowCommandCompletions(c, "nope")
- cli.ShowCommandHelp(c, "also-nope")
- cli.ShowCompletions(c)
- cli.ShowSubcommandHelp(c)
- cli.ShowVersion(c)
-
- categories := c.App.Categories()
- categories.AddCommand("sounds", cli.Command{
- Name: "bloop",
- })
-
- for _, category := range c.App.Categories() {
- fmt.Fprintf(c.App.Writer, "%s\n", category.Name)
- fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands)
- fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands())
- }
-
- fmt.Printf("%#v\n", c.App.Command("doo"))
- if c.Bool("infinite") {
- c.App.Run([]string{"app", "doo", "wop"})
- }
-
- if c.Bool("forevar") {
- c.App.RunAsSubcommand(c)
- }
- c.App.Setup()
- fmt.Printf("%#v\n", c.App.VisibleCategories())
- fmt.Printf("%#v\n", c.App.VisibleCommands())
- fmt.Printf("%#v\n", c.App.VisibleFlags())
-
- fmt.Printf("%#v\n", c.Args().First())
- if len(c.Args()) > 0 {
- fmt.Printf("%#v\n", c.Args()[1])
- }
- fmt.Printf("%#v\n", c.Args().Present())
- fmt.Printf("%#v\n", c.Args().Tail())
-
- set := flag.NewFlagSet("contrive", 0)
- nc := cli.NewContext(c.App, set, c)
-
- fmt.Printf("%#v\n", nc.Args())
- fmt.Printf("%#v\n", nc.Bool("nope"))
- fmt.Printf("%#v\n", nc.BoolT("nerp"))
- fmt.Printf("%#v\n", nc.Duration("howlong"))
- fmt.Printf("%#v\n", nc.Float64("hay"))
- fmt.Printf("%#v\n", nc.Generic("bloop"))
- fmt.Printf("%#v\n", nc.Int64("bonk"))
- fmt.Printf("%#v\n", nc.Int64Slice("burnks"))
- fmt.Printf("%#v\n", nc.Int("bips"))
- fmt.Printf("%#v\n", nc.IntSlice("blups"))
- fmt.Printf("%#v\n", nc.String("snurt"))
- fmt.Printf("%#v\n", nc.StringSlice("snurkles"))
- fmt.Printf("%#v\n", nc.Uint("flub"))
- fmt.Printf("%#v\n", nc.Uint64("florb"))
- fmt.Printf("%#v\n", nc.GlobalBool("global-nope"))
- fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp"))
- fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong"))
- fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay"))
- fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop"))
- fmt.Printf("%#v\n", nc.GlobalInt("global-bips"))
- fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups"))
- fmt.Printf("%#v\n", nc.GlobalString("global-snurt"))
- fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles"))
-
- fmt.Printf("%#v\n", nc.FlagNames())
- fmt.Printf("%#v\n", nc.GlobalFlagNames())
- fmt.Printf("%#v\n", nc.GlobalIsSet("wat"))
- fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope"))
- fmt.Printf("%#v\n", nc.NArg())
- fmt.Printf("%#v\n", nc.NumFlags())
- fmt.Printf("%#v\n", nc.Parent())
-
- nc.Set("wat", "also-nope")
-
- ec := cli.NewExitError("ohwell", 86)
- fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode())
- fmt.Printf("made it!\n")
- return nil
- }
-
- if os.Getenv("HEXY") != "" {
- app.Writer = &hexWriter{}
- app.ErrWriter = &hexWriter{}
- }
-
- app.Metadata = map[string]interface{}{
- "layers": "many",
- "explicable": false,
- "whatever-values": 19.99,
- }
-
-
- // ignore error so we don't exit non-zero and break gfmrun README example tests
- _ = app.Run(os.Args)
-}
-
-func wopAction(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n")
- return nil
-}
-```
-
-### Combining short Bool options
-
-Traditional use of boolean options using their shortnames look like this:
-```
-# cmd foobar -s -o
-```
-
-Suppose you want users to be able to combine your bool options with their shortname. This
-can be done using the **UseShortOptionHandling** bool in your commands. Suppose your program
-has a two bool flags such as *serve* and *option* with the short options of *-o* and
-*-s* respectively. With **UseShortOptionHandling** set to *true*, a user can use a syntax
-like:
-```
-# cmd foobar -so
-```
-
-If you enable the **UseShortOptionHandling*, then you must not use any flags that have a single
-leading *-* or this will result in failures. For example, **-option** can no longer be used. Flags
-with two leading dashes (such as **--options**) are still valid.
-
-## Contribution Guidelines
-
-See [./CONTRIBUTING.md](./CONTRIBUTING.md)
diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go
deleted file mode 100644
index 9add067b0..000000000
--- a/vendor/github.com/urfave/cli/app.go
+++ /dev/null
@@ -1,508 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "time"
-)
-
-var (
- changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md"
- appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL)
- runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL)
-
- contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you."
-
- errInvalidActionType = NewExitError("ERROR invalid Action type. "+
- fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+
- fmt.Sprintf("See %s", appActionDeprecationURL), 2)
-)
-
-// App is the main structure of a cli application. It is recommended that
-// an app be created with the cli.NewApp() function
-type App struct {
- // The name of the program. Defaults to path.Base(os.Args[0])
- Name string
- // Full name of command for help, defaults to Name
- HelpName string
- // Description of the program.
- Usage string
- // Text to override the USAGE section of help
- UsageText string
- // Description of the program argument format.
- ArgsUsage string
- // Version of the program
- Version string
- // Description of the program
- Description string
- // List of commands to execute
- Commands []Command
- // List of flags to parse
- Flags []Flag
- // Boolean to enable bash completion commands
- EnableBashCompletion bool
- // Boolean to hide built-in help command
- HideHelp bool
- // Boolean to hide built-in version flag and the VERSION section of help
- HideVersion bool
- // Populate on app startup, only gettable through method Categories()
- categories CommandCategories
- // An action to execute when the bash-completion flag is set
- BashComplete BashCompleteFunc
- // An action to execute before any subcommands are run, but after the context is ready
- // If a non-nil error is returned, no subcommands are run
- Before BeforeFunc
- // An action to execute after any subcommands are run, but after the subcommand has finished
- // It is run even if Action() panics
- After AfterFunc
-
- // The action to execute when no subcommands are specified
- // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}`
- // *Note*: support for the deprecated `Action` signature will be removed in a future version
- Action interface{}
-
- // Execute this function if the proper command cannot be found
- CommandNotFound CommandNotFoundFunc
- // Execute this function if an usage error occurs
- OnUsageError OnUsageErrorFunc
- // Compilation date
- Compiled time.Time
- // List of all authors who contributed
- Authors []Author
- // Copyright of the binary if any
- Copyright string
- // Name of Author (Note: Use App.Authors, this is deprecated)
- Author string
- // Email of Author (Note: Use App.Authors, this is deprecated)
- Email string
- // Writer writer to write output to
- Writer io.Writer
- // ErrWriter writes error output
- ErrWriter io.Writer
- // Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to
- // function as a default, so this is optional.
- ExitErrHandler ExitErrHandlerFunc
- // Other custom info
- Metadata map[string]interface{}
- // Carries a function which returns app specific info.
- ExtraInfo func() map[string]string
- // CustomAppHelpTemplate the text template for app help topic.
- // cli.go uses text/template to render templates. You can
- // render custom help text by setting this variable.
- CustomAppHelpTemplate string
-
- didSetup bool
-}
-
-// Tries to find out when this binary was compiled.
-// Returns the current time if it fails to find it.
-func compileTime() time.Time {
- info, err := os.Stat(os.Args[0])
- if err != nil {
- return time.Now()
- }
- return info.ModTime()
-}
-
-// NewApp creates a new cli Application with some reasonable defaults for Name,
-// Usage, Version and Action.
-func NewApp() *App {
- return &App{
- Name: filepath.Base(os.Args[0]),
- HelpName: filepath.Base(os.Args[0]),
- Usage: "A new cli application",
- UsageText: "",
- Version: "0.0.0",
- BashComplete: DefaultAppComplete,
- Action: helpCommand.Action,
- Compiled: compileTime(),
- Writer: os.Stdout,
- }
-}
-
-// Setup runs initialization code to ensure all data structures are ready for
-// `Run` or inspection prior to `Run`. It is internally called by `Run`, but
-// will return early if setup has already happened.
-func (a *App) Setup() {
- if a.didSetup {
- return
- }
-
- a.didSetup = true
-
- if a.Author != "" || a.Email != "" {
- a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
- }
-
- newCmds := []Command{}
- for _, c := range a.Commands {
- if c.HelpName == "" {
- c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
- }
- newCmds = append(newCmds, c)
- }
- a.Commands = newCmds
-
- if a.Command(helpCommand.Name) == nil && !a.HideHelp {
- a.Commands = append(a.Commands, helpCommand)
- if (HelpFlag != BoolFlag{}) {
- a.appendFlag(HelpFlag)
- }
- }
-
- if !a.HideVersion {
- a.appendFlag(VersionFlag)
- }
-
- a.categories = CommandCategories{}
- for _, command := range a.Commands {
- a.categories = a.categories.AddCommand(command.Category, command)
- }
- sort.Sort(a.categories)
-
- if a.Metadata == nil {
- a.Metadata = make(map[string]interface{})
- }
-
- if a.Writer == nil {
- a.Writer = os.Stdout
- }
-}
-
-// Run is the entry point to the cli app. Parses the arguments slice and routes
-// to the proper flag/args combination
-func (a *App) Run(arguments []string) (err error) {
- a.Setup()
-
- // handle the completion flag separately from the flagset since
- // completion could be attempted after a flag, but before its value was put
- // on the command line. this causes the flagset to interpret the completion
- // flag name as the value of the flag before it which is undesirable
- // note that we can only do this because the shell autocomplete function
- // always appends the completion flag at the end of the command
- shellComplete, arguments := checkShellCompleteFlag(a, arguments)
-
- // parse flags
- set, err := flagSet(a.Name, a.Flags)
- if err != nil {
- return err
- }
-
- set.SetOutput(ioutil.Discard)
- err = set.Parse(arguments[1:])
- nerr := normalizeFlags(a.Flags, set)
- context := NewContext(a, set, nil)
- if nerr != nil {
- fmt.Fprintln(a.Writer, nerr)
- ShowAppHelp(context)
- return nerr
- }
- context.shellComplete = shellComplete
-
- if checkCompletions(context) {
- return nil
- }
-
- if err != nil {
- if a.OnUsageError != nil {
- err := a.OnUsageError(context, err, false)
- a.handleExitCoder(context, err)
- return err
- }
- fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
- ShowAppHelp(context)
- return err
- }
-
- if !a.HideHelp && checkHelp(context) {
- ShowAppHelp(context)
- return nil
- }
-
- if !a.HideVersion && checkVersion(context) {
- ShowVersion(context)
- return nil
- }
-
- if a.After != nil {
- defer func() {
- if afterErr := a.After(context); afterErr != nil {
- if err != nil {
- err = NewMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if a.Before != nil {
- beforeErr := a.Before(context)
- if beforeErr != nil {
- fmt.Fprintf(a.Writer, "%v\n\n", beforeErr)
- ShowAppHelp(context)
- a.handleExitCoder(context, beforeErr)
- err = beforeErr
- return err
- }
- }
-
- args := context.Args()
- if args.Present() {
- name := args.First()
- c := a.Command(name)
- if c != nil {
- return c.Run(context)
- }
- }
-
- if a.Action == nil {
- a.Action = helpCommand.Action
- }
-
- // Run default Action
- err = HandleAction(a.Action, context)
-
- a.handleExitCoder(context, err)
- return err
-}
-
-// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
-//
-// Deprecated: instead you should return an error that fulfills cli.ExitCoder
-// to cli.App.Run. This will cause the application to exit with the given eror
-// code in the cli.ExitCoder
-func (a *App) RunAndExitOnError() {
- if err := a.Run(os.Args); err != nil {
- fmt.Fprintln(a.errWriter(), err)
- OsExiter(1)
- }
-}
-
-// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to
-// generate command-specific flags
-func (a *App) RunAsSubcommand(ctx *Context) (err error) {
- // append help to commands
- if len(a.Commands) > 0 {
- if a.Command(helpCommand.Name) == nil && !a.HideHelp {
- a.Commands = append(a.Commands, helpCommand)
- if (HelpFlag != BoolFlag{}) {
- a.appendFlag(HelpFlag)
- }
- }
- }
-
- newCmds := []Command{}
- for _, c := range a.Commands {
- if c.HelpName == "" {
- c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
- }
- newCmds = append(newCmds, c)
- }
- a.Commands = newCmds
-
- // parse flags
- set, err := flagSet(a.Name, a.Flags)
- if err != nil {
- return err
- }
-
- set.SetOutput(ioutil.Discard)
- err = set.Parse(ctx.Args().Tail())
- nerr := normalizeFlags(a.Flags, set)
- context := NewContext(a, set, ctx)
-
- if nerr != nil {
- fmt.Fprintln(a.Writer, nerr)
- fmt.Fprintln(a.Writer)
- if len(a.Commands) > 0 {
- ShowSubcommandHelp(context)
- } else {
- ShowCommandHelp(ctx, context.Args().First())
- }
- return nerr
- }
-
- if checkCompletions(context) {
- return nil
- }
-
- if err != nil {
- if a.OnUsageError != nil {
- err = a.OnUsageError(context, err, true)
- a.handleExitCoder(context, err)
- return err
- }
- fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
- ShowSubcommandHelp(context)
- return err
- }
-
- if len(a.Commands) > 0 {
- if checkSubcommandHelp(context) {
- return nil
- }
- } else {
- if checkCommandHelp(ctx, context.Args().First()) {
- return nil
- }
- }
-
- if a.After != nil {
- defer func() {
- afterErr := a.After(context)
- if afterErr != nil {
- a.handleExitCoder(context, err)
- if err != nil {
- err = NewMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if a.Before != nil {
- beforeErr := a.Before(context)
- if beforeErr != nil {
- a.handleExitCoder(context, beforeErr)
- err = beforeErr
- return err
- }
- }
-
- args := context.Args()
- if args.Present() {
- name := args.First()
- c := a.Command(name)
- if c != nil {
- return c.Run(context)
- }
- }
-
- // Run default Action
- err = HandleAction(a.Action, context)
-
- a.handleExitCoder(context, err)
- return err
-}
-
-// Command returns the named command on App. Returns nil if the command does not exist
-func (a *App) Command(name string) *Command {
- for _, c := range a.Commands {
- if c.HasName(name) {
- return &c
- }
- }
-
- return nil
-}
-
-// Categories returns a slice containing all the categories with the commands they contain
-func (a *App) Categories() CommandCategories {
- return a.categories
-}
-
-// VisibleCategories returns a slice of categories and commands that are
-// Hidden=false
-func (a *App) VisibleCategories() []*CommandCategory {
- ret := []*CommandCategory{}
- for _, category := range a.categories {
- if visible := func() *CommandCategory {
- for _, command := range category.Commands {
- if !command.Hidden {
- return category
- }
- }
- return nil
- }(); visible != nil {
- ret = append(ret, visible)
- }
- }
- return ret
-}
-
-// VisibleCommands returns a slice of the Commands with Hidden=false
-func (a *App) VisibleCommands() []Command {
- ret := []Command{}
- for _, command := range a.Commands {
- if !command.Hidden {
- ret = append(ret, command)
- }
- }
- return ret
-}
-
-// VisibleFlags returns a slice of the Flags with Hidden=false
-func (a *App) VisibleFlags() []Flag {
- return visibleFlags(a.Flags)
-}
-
-func (a *App) hasFlag(flag Flag) bool {
- for _, f := range a.Flags {
- if flag == f {
- return true
- }
- }
-
- return false
-}
-
-func (a *App) errWriter() io.Writer {
- // When the app ErrWriter is nil use the package level one.
- if a.ErrWriter == nil {
- return ErrWriter
- }
-
- return a.ErrWriter
-}
-
-func (a *App) appendFlag(flag Flag) {
- if !a.hasFlag(flag) {
- a.Flags = append(a.Flags, flag)
- }
-}
-
-func (a *App) handleExitCoder(context *Context, err error) {
- if a.ExitErrHandler != nil {
- a.ExitErrHandler(context, err)
- } else {
- HandleExitCoder(err)
- }
-}
-
-// Author represents someone who has contributed to a cli project.
-type Author struct {
- Name string // The Authors name
- Email string // The Authors email
-}
-
-// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
-func (a Author) String() string {
- e := ""
- if a.Email != "" {
- e = " <" + a.Email + ">"
- }
-
- return fmt.Sprintf("%v%v", a.Name, e)
-}
-
-// HandleAction attempts to figure out which Action signature was used. If
-// it's an ActionFunc or a func with the legacy signature for Action, the func
-// is run!
-func HandleAction(action interface{}, context *Context) (err error) {
- if a, ok := action.(ActionFunc); ok {
- return a(context)
- } else if a, ok := action.(func(*Context) error); ok {
- return a(context)
- } else if a, ok := action.(func(*Context)); ok { // deprecated function signature
- a(context)
- return nil
- }
-
- return errInvalidActionType
-}
diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go
deleted file mode 100644
index bf3c73c55..000000000
--- a/vendor/github.com/urfave/cli/category.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package cli
-
-// CommandCategories is a slice of *CommandCategory.
-type CommandCategories []*CommandCategory
-
-// CommandCategory is a category containing commands.
-type CommandCategory struct {
- Name string
- Commands Commands
-}
-
-func (c CommandCategories) Less(i, j int) bool {
- return lexicographicLess(c[i].Name, c[j].Name)
-}
-
-func (c CommandCategories) Len() int {
- return len(c)
-}
-
-func (c CommandCategories) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-// AddCommand adds a command to a category.
-func (c CommandCategories) AddCommand(category string, command Command) CommandCategories {
- for _, commandCategory := range c {
- if commandCategory.Name == category {
- commandCategory.Commands = append(commandCategory.Commands, command)
- return c
- }
- }
- return append(c, &CommandCategory{Name: category, Commands: []Command{command}})
-}
-
-// VisibleCommands returns a slice of the Commands with Hidden=false
-func (c *CommandCategory) VisibleCommands() []Command {
- ret := []Command{}
- for _, command := range c.Commands {
- if !command.Hidden {
- ret = append(ret, command)
- }
- }
- return ret
-}
diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go
deleted file mode 100644
index 90c07eb8e..000000000
--- a/vendor/github.com/urfave/cli/cli.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Package cli provides a minimal framework for creating and organizing command line
-// Go applications. cli is designed to be easy to understand and write, the most simple
-// cli application can be written as follows:
-// func main() {
-// cli.NewApp().Run(os.Args)
-// }
-//
-// Of course this application does not do much, so let's make this an actual application:
-// func main() {
-// app := cli.NewApp()
-// app.Name = "greet"
-// app.Usage = "say a greeting"
-// app.Action = func(c *cli.Context) error {
-// println("Greetings")
-// return nil
-// }
-//
-// app.Run(os.Args)
-// }
-package cli
-
-//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go
diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go
deleted file mode 100644
index 3d44404f1..000000000
--- a/vendor/github.com/urfave/cli/command.go
+++ /dev/null
@@ -1,383 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "sort"
- "strings"
-)
-
-// Command is a subcommand for a cli.App.
-type Command struct {
- // The name of the command
- Name string
- // short name of the command. Typically one character (deprecated, use `Aliases`)
- ShortName string
- // A list of aliases for the command
- Aliases []string
- // A short description of the usage of this command
- Usage string
- // Custom text to show on USAGE section of help
- UsageText string
- // A longer explanation of how the command works
- Description string
- // A short description of the arguments of this command
- ArgsUsage string
- // The category the command is part of
- Category string
- // The function to call when checking for bash command completions
- BashComplete BashCompleteFunc
- // An action to execute before any sub-subcommands are run, but after the context is ready
- // If a non-nil error is returned, no sub-subcommands are run
- Before BeforeFunc
- // An action to execute after any subcommands are run, but after the subcommand has finished
- // It is run even if Action() panics
- After AfterFunc
- // The function to call when this command is invoked
- Action interface{}
- // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind
- // of deprecation period has passed, maybe?
-
- // Execute this function if a usage error occurs.
- OnUsageError OnUsageErrorFunc
- // List of child commands
- Subcommands Commands
- // List of flags to parse
- Flags []Flag
- // Treat all flags as normal arguments if true
- SkipFlagParsing bool
- // Skip argument reordering which attempts to move flags before arguments,
- // but only works if all flags appear after all arguments. This behavior was
- // removed n version 2 since it only works under specific conditions so we
- // backport here by exposing it as an option for compatibility.
- SkipArgReorder bool
- // Boolean to hide built-in help command
- HideHelp bool
- // Boolean to hide this command from help or completion
- Hidden bool
- // Boolean to enable short-option handling so user can combine several
- // single-character bool arguments into one
- // i.e. foobar -o -v -> foobar -ov
- UseShortOptionHandling bool
-
- // Full name of command for help, defaults to full command name, including parent commands.
- HelpName string
- commandNamePath []string
-
- // CustomHelpTemplate the text template for the command help topic.
- // cli.go uses text/template to render templates. You can
- // render custom help text by setting this variable.
- CustomHelpTemplate string
-}
-
-type CommandsByName []Command
-
-func (c CommandsByName) Len() int {
- return len(c)
-}
-
-func (c CommandsByName) Less(i, j int) bool {
- return lexicographicLess(c[i].Name, c[j].Name)
-}
-
-func (c CommandsByName) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-// FullName returns the full name of the command.
-// For subcommands this ensures that parent commands are part of the command path
-func (c Command) FullName() string {
- if c.commandNamePath == nil {
- return c.Name
- }
- return strings.Join(c.commandNamePath, " ")
-}
-
-// Commands is a slice of Command
-type Commands []Command
-
-// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags
-func (c Command) Run(ctx *Context) (err error) {
- if len(c.Subcommands) > 0 {
- return c.startApp(ctx)
- }
-
- if !c.HideHelp && (HelpFlag != BoolFlag{}) {
- // append help to flags
- c.Flags = append(
- c.Flags,
- HelpFlag,
- )
- }
-
- set, err := c.parseFlags(ctx.Args().Tail())
-
- context := NewContext(ctx.App, set, ctx)
- context.Command = c
- if checkCommandCompletions(context, c.Name) {
- return nil
- }
-
- if err != nil {
- if c.OnUsageError != nil {
- err := c.OnUsageError(context, err, false)
- context.App.handleExitCoder(context, err)
- return err
- }
- fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error())
- fmt.Fprintln(context.App.Writer)
- ShowCommandHelp(context, c.Name)
- return err
- }
-
- if checkCommandHelp(context, c.Name) {
- return nil
- }
-
- if c.After != nil {
- defer func() {
- afterErr := c.After(context)
- if afterErr != nil {
- context.App.handleExitCoder(context, err)
- if err != nil {
- err = NewMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if c.Before != nil {
- err = c.Before(context)
- if err != nil {
- ShowCommandHelp(context, c.Name)
- context.App.handleExitCoder(context, err)
- return err
- }
- }
-
- if c.Action == nil {
- c.Action = helpSubcommand.Action
- }
-
- err = HandleAction(c.Action, context)
-
- if err != nil {
- context.App.handleExitCoder(context, err)
- }
- return err
-}
-
-func (c *Command) parseFlags(args Args) (*flag.FlagSet, error) {
- set, err := flagSet(c.Name, c.Flags)
- if err != nil {
- return nil, err
- }
- set.SetOutput(ioutil.Discard)
-
- if c.SkipFlagParsing {
- return set, set.Parse(append([]string{"--"}, args...))
- }
-
- if !c.SkipArgReorder {
- args = reorderArgs(args)
- }
-
-PARSE:
- err = set.Parse(args)
- if err != nil {
- if c.UseShortOptionHandling {
- // To enable short-option handling (e.g., "-it" vs "-i -t")
- // we have to iteratively catch parsing errors. This way
- // we achieve LR parsing without transforming any arguments.
- // Otherwise, there is no way we can discriminate combined
- // short options from common arguments that should be left
- // untouched.
- errStr := err.Error()
- trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: ")
- if errStr == trimmed {
- return nil, err
- }
- // regenerate the initial args with the split short opts
- newArgs := Args{}
- for i, arg := range args {
- if arg != trimmed {
- newArgs = append(newArgs, arg)
- continue
- }
- shortOpts := translateShortOptions(set, Args{trimmed})
- if len(shortOpts) == 1 {
- return nil, err
- }
- // add each short option and all remaining arguments
- newArgs = append(newArgs, shortOpts...)
- newArgs = append(newArgs, args[i+1:]...)
- args = newArgs
- // now reset the flagset parse again
- set, err = flagSet(c.Name, c.Flags)
- if err != nil {
- return nil, err
- }
- set.SetOutput(ioutil.Discard)
- goto PARSE
- }
- }
- return nil, err
- }
-
- err = normalizeFlags(c.Flags, set)
- if err != nil {
- return nil, err
- }
-
- return set, nil
-}
-
-// reorderArgs moves all flags before arguments as this is what flag expects
-func reorderArgs(args []string) []string {
- var nonflags, flags []string
-
- readFlagValue := false
- for i, arg := range args {
- if arg == "--" {
- nonflags = append(nonflags, args[i:]...)
- break
- }
-
- if readFlagValue && !strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") {
- readFlagValue = false
- flags = append(flags, arg)
- continue
- }
- readFlagValue = false
-
- if arg != "-" && strings.HasPrefix(arg, "-") {
- flags = append(flags, arg)
-
- readFlagValue = !strings.Contains(arg, "=")
- } else {
- nonflags = append(nonflags, arg)
- }
- }
-
- return append(flags, nonflags...)
-}
-
-func translateShortOptions(set *flag.FlagSet, flagArgs Args) []string {
- allCharsFlags := func (s string) bool {
- for i := range s {
- f := set.Lookup(string(s[i]))
- if f == nil {
- return false
- }
- }
- return true
- }
-
- // separate combined flags
- var flagArgsSeparated []string
- for _, flagArg := range flagArgs {
- if strings.HasPrefix(flagArg, "-") && strings.HasPrefix(flagArg, "--") == false && len(flagArg) > 2 {
- if !allCharsFlags(flagArg[1:]) {
- flagArgsSeparated = append(flagArgsSeparated, flagArg)
- continue
- }
- for _, flagChar := range flagArg[1:] {
- flagArgsSeparated = append(flagArgsSeparated, "-"+string(flagChar))
- }
- } else {
- flagArgsSeparated = append(flagArgsSeparated, flagArg)
- }
- }
- return flagArgsSeparated
-}
-
-// Names returns the names including short names and aliases.
-func (c Command) Names() []string {
- names := []string{c.Name}
-
- if c.ShortName != "" {
- names = append(names, c.ShortName)
- }
-
- return append(names, c.Aliases...)
-}
-
-// HasName returns true if Command.Name or Command.ShortName matches given name
-func (c Command) HasName(name string) bool {
- for _, n := range c.Names() {
- if n == name {
- return true
- }
- }
- return false
-}
-
-func (c Command) startApp(ctx *Context) error {
- app := NewApp()
- app.Metadata = ctx.App.Metadata
- // set the name and usage
- app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
- if c.HelpName == "" {
- app.HelpName = c.HelpName
- } else {
- app.HelpName = app.Name
- }
-
- app.Usage = c.Usage
- app.Description = c.Description
- app.ArgsUsage = c.ArgsUsage
-
- // set CommandNotFound
- app.CommandNotFound = ctx.App.CommandNotFound
- app.CustomAppHelpTemplate = c.CustomHelpTemplate
-
- // set the flags and commands
- app.Commands = c.Subcommands
- app.Flags = c.Flags
- app.HideHelp = c.HideHelp
-
- app.Version = ctx.App.Version
- app.HideVersion = ctx.App.HideVersion
- app.Compiled = ctx.App.Compiled
- app.Author = ctx.App.Author
- app.Email = ctx.App.Email
- app.Writer = ctx.App.Writer
- app.ErrWriter = ctx.App.ErrWriter
-
- app.categories = CommandCategories{}
- for _, command := range c.Subcommands {
- app.categories = app.categories.AddCommand(command.Category, command)
- }
-
- sort.Sort(app.categories)
-
- // bash completion
- app.EnableBashCompletion = ctx.App.EnableBashCompletion
- if c.BashComplete != nil {
- app.BashComplete = c.BashComplete
- }
-
- // set the actions
- app.Before = c.Before
- app.After = c.After
- if c.Action != nil {
- app.Action = c.Action
- } else {
- app.Action = helpSubcommand.Action
- }
- app.OnUsageError = c.OnUsageError
-
- for index, cc := range app.Commands {
- app.Commands[index].commandNamePath = []string{c.Name, cc.Name}
- }
-
- return app.RunAsSubcommand(ctx)
-}
-
-// VisibleFlags returns a slice of the Flags with Hidden=false
-func (c Command) VisibleFlags() []Flag {
- return visibleFlags(c.Flags)
-}
diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go
deleted file mode 100644
index 552ee7405..000000000
--- a/vendor/github.com/urfave/cli/context.go
+++ /dev/null
@@ -1,287 +0,0 @@
-package cli
-
-import (
- "errors"
- "flag"
- "os"
- "reflect"
- "strings"
- "syscall"
-)
-
-// Context is a type that is passed through to
-// each Handler action in a cli application. Context
-// can be used to retrieve context-specific Args and
-// parsed command-line options.
-type Context struct {
- App *App
- Command Command
- shellComplete bool
- flagSet *flag.FlagSet
- setFlags map[string]bool
- parentContext *Context
-}
-
-// NewContext creates a new context. For use in when invoking an App or Command action.
-func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
- c := &Context{App: app, flagSet: set, parentContext: parentCtx}
-
- if parentCtx != nil {
- c.shellComplete = parentCtx.shellComplete
- }
-
- return c
-}
-
-// NumFlags returns the number of flags set
-func (c *Context) NumFlags() int {
- return c.flagSet.NFlag()
-}
-
-// Set sets a context flag to a value.
-func (c *Context) Set(name, value string) error {
- c.setFlags = nil
- return c.flagSet.Set(name, value)
-}
-
-// GlobalSet sets a context flag to a value on the global flagset
-func (c *Context) GlobalSet(name, value string) error {
- globalContext(c).setFlags = nil
- return globalContext(c).flagSet.Set(name, value)
-}
-
-// IsSet determines if the flag was actually set
-func (c *Context) IsSet(name string) bool {
- if c.setFlags == nil {
- c.setFlags = make(map[string]bool)
-
- c.flagSet.Visit(func(f *flag.Flag) {
- c.setFlags[f.Name] = true
- })
-
- c.flagSet.VisitAll(func(f *flag.Flag) {
- if _, ok := c.setFlags[f.Name]; ok {
- return
- }
- c.setFlags[f.Name] = false
- })
-
- // XXX hack to support IsSet for flags with EnvVar
- //
- // There isn't an easy way to do this with the current implementation since
- // whether a flag was set via an environment variable is very difficult to
- // determine here. Instead, we intend to introduce a backwards incompatible
- // change in version 2 to add `IsSet` to the Flag interface to push the
- // responsibility closer to where the information required to determine
- // whether a flag is set by non-standard means such as environment
- // variables is available.
- //
- // See https://github.com/urfave/cli/issues/294 for additional discussion
- flags := c.Command.Flags
- if c.Command.Name == "" { // cannot == Command{} since it contains slice types
- if c.App != nil {
- flags = c.App.Flags
- }
- }
- for _, f := range flags {
- eachName(f.GetName(), func(name string) {
- if isSet, ok := c.setFlags[name]; isSet || !ok {
- return
- }
-
- val := reflect.ValueOf(f)
- if val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
-
- filePathValue := val.FieldByName("FilePath")
- if filePathValue.IsValid() {
- eachName(filePathValue.String(), func(filePath string) {
- if _, err := os.Stat(filePath); err == nil {
- c.setFlags[name] = true
- return
- }
- })
- }
-
- envVarValue := val.FieldByName("EnvVar")
- if envVarValue.IsValid() {
- eachName(envVarValue.String(), func(envVar string) {
- envVar = strings.TrimSpace(envVar)
- if _, ok := syscall.Getenv(envVar); ok {
- c.setFlags[name] = true
- return
- }
- })
- }
- })
- }
- }
-
- return c.setFlags[name]
-}
-
-// GlobalIsSet determines if the global flag was actually set
-func (c *Context) GlobalIsSet(name string) bool {
- ctx := c
- if ctx.parentContext != nil {
- ctx = ctx.parentContext
- }
-
- for ; ctx != nil; ctx = ctx.parentContext {
- if ctx.IsSet(name) {
- return true
- }
- }
- return false
-}
-
-// FlagNames returns a slice of flag names used in this context.
-func (c *Context) FlagNames() (names []string) {
- for _, flag := range c.Command.Flags {
- name := strings.Split(flag.GetName(), ",")[0]
- if name == "help" {
- continue
- }
- names = append(names, name)
- }
- return
-}
-
-// GlobalFlagNames returns a slice of global flag names used by the app.
-func (c *Context) GlobalFlagNames() (names []string) {
- for _, flag := range c.App.Flags {
- name := strings.Split(flag.GetName(), ",")[0]
- if name == "help" || name == "version" {
- continue
- }
- names = append(names, name)
- }
- return
-}
-
-// Parent returns the parent context, if any
-func (c *Context) Parent() *Context {
- return c.parentContext
-}
-
-// value returns the value of the flag coressponding to `name`
-func (c *Context) value(name string) interface{} {
- return c.flagSet.Lookup(name).Value.(flag.Getter).Get()
-}
-
-// Args contains apps console arguments
-type Args []string
-
-// Args returns the command line arguments associated with the context.
-func (c *Context) Args() Args {
- args := Args(c.flagSet.Args())
- return args
-}
-
-// NArg returns the number of the command line arguments.
-func (c *Context) NArg() int {
- return len(c.Args())
-}
-
-// Get returns the nth argument, or else a blank string
-func (a Args) Get(n int) string {
- if len(a) > n {
- return a[n]
- }
- return ""
-}
-
-// First returns the first argument, or else a blank string
-func (a Args) First() string {
- return a.Get(0)
-}
-
-// Tail returns the rest of the arguments (not the first one)
-// or else an empty string slice
-func (a Args) Tail() []string {
- if len(a) >= 2 {
- return []string(a)[1:]
- }
- return []string{}
-}
-
-// Present checks if there are any arguments present
-func (a Args) Present() bool {
- return len(a) != 0
-}
-
-// Swap swaps arguments at the given indexes
-func (a Args) Swap(from, to int) error {
- if from >= len(a) || to >= len(a) {
- return errors.New("index out of range")
- }
- a[from], a[to] = a[to], a[from]
- return nil
-}
-
-func globalContext(ctx *Context) *Context {
- if ctx == nil {
- return nil
- }
-
- for {
- if ctx.parentContext == nil {
- return ctx
- }
- ctx = ctx.parentContext
- }
-}
-
-func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
- if ctx.parentContext != nil {
- ctx = ctx.parentContext
- }
- for ; ctx != nil; ctx = ctx.parentContext {
- if f := ctx.flagSet.Lookup(name); f != nil {
- return ctx.flagSet
- }
- }
- return nil
-}
-
-func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
- switch ff.Value.(type) {
- case *StringSlice:
- default:
- set.Set(name, ff.Value.String())
- }
-}
-
-func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
- visited := make(map[string]bool)
- set.Visit(func(f *flag.Flag) {
- visited[f.Name] = true
- })
- for _, f := range flags {
- parts := strings.Split(f.GetName(), ",")
- if len(parts) == 1 {
- continue
- }
- var ff *flag.Flag
- for _, name := range parts {
- name = strings.Trim(name, " ")
- if visited[name] {
- if ff != nil {
- return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
- }
- ff = set.Lookup(name)
- }
- }
- if ff == nil {
- continue
- }
- for _, name := range parts {
- name = strings.Trim(name, " ")
- if !visited[name] {
- copyFlag(name, ff, set)
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go
deleted file mode 100644
index 562b2953c..000000000
--- a/vendor/github.com/urfave/cli/errors.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-)
-
-// OsExiter is the function used when the app exits. If not set defaults to os.Exit.
-var OsExiter = os.Exit
-
-// ErrWriter is used to write errors to the user. This can be anything
-// implementing the io.Writer interface and defaults to os.Stderr.
-var ErrWriter io.Writer = os.Stderr
-
-// MultiError is an error that wraps multiple errors.
-type MultiError struct {
- Errors []error
-}
-
-// NewMultiError creates a new MultiError. Pass in one or more errors.
-func NewMultiError(err ...error) MultiError {
- return MultiError{Errors: err}
-}
-
-// Error implements the error interface.
-func (m MultiError) Error() string {
- errs := make([]string, len(m.Errors))
- for i, err := range m.Errors {
- errs[i] = err.Error()
- }
-
- return strings.Join(errs, "\n")
-}
-
-type ErrorFormatter interface {
- Format(s fmt.State, verb rune)
-}
-
-// ExitCoder is the interface checked by `App` and `Command` for a custom exit
-// code
-type ExitCoder interface {
- error
- ExitCode() int
-}
-
-// ExitError fulfills both the builtin `error` interface and `ExitCoder`
-type ExitError struct {
- exitCode int
- message interface{}
-}
-
-// NewExitError makes a new *ExitError
-func NewExitError(message interface{}, exitCode int) *ExitError {
- return &ExitError{
- exitCode: exitCode,
- message: message,
- }
-}
-
-// Error returns the string message, fulfilling the interface required by
-// `error`
-func (ee *ExitError) Error() string {
- return fmt.Sprintf("%v", ee.message)
-}
-
-// ExitCode returns the exit code, fulfilling the interface required by
-// `ExitCoder`
-func (ee *ExitError) ExitCode() int {
- return ee.exitCode
-}
-
-// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if
-// so prints the error to stderr (if it is non-empty) and calls OsExiter with the
-// given exit code. If the given error is a MultiError, then this func is
-// called on all members of the Errors slice and calls OsExiter with the last exit code.
-func HandleExitCoder(err error) {
- if err == nil {
- return
- }
-
- if exitErr, ok := err.(ExitCoder); ok {
- if err.Error() != "" {
- if _, ok := exitErr.(ErrorFormatter); ok {
- fmt.Fprintf(ErrWriter, "%+v\n", err)
- } else {
- fmt.Fprintln(ErrWriter, err)
- }
- }
- OsExiter(exitErr.ExitCode())
- return
- }
-
- if multiErr, ok := err.(MultiError); ok {
- code := handleMultiError(multiErr)
- OsExiter(code)
- return
- }
-}
-
-func handleMultiError(multiErr MultiError) int {
- code := 1
- for _, merr := range multiErr.Errors {
- if multiErr2, ok := merr.(MultiError); ok {
- code = handleMultiError(multiErr2)
- } else {
- fmt.Fprintln(ErrWriter, merr)
- if exitErr, ok := merr.(ExitCoder); ok {
- code = exitErr.ExitCode()
- }
- }
- }
- return code
-}
diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go
deleted file mode 100644
index b0cffc006..000000000
--- a/vendor/github.com/urfave/cli/flag.go
+++ /dev/null
@@ -1,786 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "syscall"
- "time"
-)
-
-const defaultPlaceholder = "value"
-
-// BashCompletionFlag enables bash-completion for all commands and subcommands
-var BashCompletionFlag Flag = BoolFlag{
- Name: "generate-bash-completion",
- Hidden: true,
-}
-
-// VersionFlag prints the version for the application
-var VersionFlag Flag = BoolFlag{
- Name: "version, v",
- Usage: "print the version",
-}
-
-// HelpFlag prints the help for all commands and subcommands
-// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand
-// unless HideHelp is set to true)
-var HelpFlag Flag = BoolFlag{
- Name: "help, h",
- Usage: "show help",
-}
-
-// FlagStringer converts a flag definition to a string. This is used by help
-// to display a flag.
-var FlagStringer FlagStringFunc = stringifyFlag
-
-// FlagNamePrefixer converts a full flag name and its placeholder into the help
-// message flag prefix. This is used by the default FlagStringer.
-var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames
-
-// FlagEnvHinter annotates flag help message with the environment variable
-// details. This is used by the default FlagStringer.
-var FlagEnvHinter FlagEnvHintFunc = withEnvHint
-
-// FlagFileHinter annotates flag help message with the environment variable
-// details. This is used by the default FlagStringer.
-var FlagFileHinter FlagFileHintFunc = withFileHint
-
-// FlagsByName is a slice of Flag.
-type FlagsByName []Flag
-
-func (f FlagsByName) Len() int {
- return len(f)
-}
-
-func (f FlagsByName) Less(i, j int) bool {
- return lexicographicLess(f[i].GetName(), f[j].GetName())
-}
-
-func (f FlagsByName) Swap(i, j int) {
- f[i], f[j] = f[j], f[i]
-}
-
-// Flag is a common interface related to parsing flags in cli.
-// For more advanced flag parsing techniques, it is recommended that
-// this interface be implemented.
-type Flag interface {
- fmt.Stringer
- // Apply Flag settings to the given flag set
- Apply(*flag.FlagSet)
- GetName() string
-}
-
-// errorableFlag is an interface that allows us to return errors during apply
-// it allows flags defined in this library to return errors in a fashion backwards compatible
-// TODO remove in v2 and modify the existing Flag interface to return errors
-type errorableFlag interface {
- Flag
-
- ApplyWithError(*flag.FlagSet) error
-}
-
-func flagSet(name string, flags []Flag) (*flag.FlagSet, error) {
- set := flag.NewFlagSet(name, flag.ContinueOnError)
-
- for _, f := range flags {
- //TODO remove in v2 when errorableFlag is removed
- if ef, ok := f.(errorableFlag); ok {
- if err := ef.ApplyWithError(set); err != nil {
- return nil, err
- }
- } else {
- f.Apply(set)
- }
- }
- return set, nil
-}
-
-func eachName(longName string, fn func(string)) {
- parts := strings.Split(longName, ",")
- for _, name := range parts {
- name = strings.Trim(name, " ")
- fn(name)
- }
-}
-
-// Generic is a generic parseable type identified by a specific flag
-type Generic interface {
- Set(value string) error
- String() string
-}
-
-// Apply takes the flagset and calls Set on the generic flag with the value
-// provided by the user for parsing by the flag
-// Ignores parsing errors
-func (f GenericFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError takes the flagset and calls Set on the generic flag with the value
-// provided by the user for parsing by the flag
-func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error {
- val := f.Value
- if fileEnvVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- if err := val.Set(fileEnvVal); err != nil {
- return fmt.Errorf("could not parse %s as value for flag %s: %s", fileEnvVal, f.Name, err)
- }
- }
-
- eachName(f.Name, func(name string) {
- set.Var(f.Value, name, f.Usage)
- })
-
- return nil
-}
-
-// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter
-type StringSlice []string
-
-// Set appends the string value to the list of values
-func (f *StringSlice) Set(value string) error {
- *f = append(*f, value)
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (f *StringSlice) String() string {
- return fmt.Sprintf("%s", *f)
-}
-
-// Value returns the slice of strings set by this flag
-func (f *StringSlice) Value() []string {
- return *f
-}
-
-// Get returns the slice of strings set by this flag
-func (f *StringSlice) Get() interface{} {
- return *f
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f StringSliceFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- newVal := &StringSlice{}
- for _, s := range strings.Split(envVal, ",") {
- s = strings.TrimSpace(s)
- if err := newVal.Set(s); err != nil {
- return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err)
- }
- }
- if f.Value == nil {
- f.Value = newVal
- } else {
- *f.Value = *newVal
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Value == nil {
- f.Value = &StringSlice{}
- }
- set.Var(f.Value, name, f.Usage)
- })
-
- return nil
-}
-
-// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter
-type IntSlice []int
-
-// Set parses the value into an integer and appends it to the list of values
-func (f *IntSlice) Set(value string) error {
- tmp, err := strconv.Atoi(value)
- if err != nil {
- return err
- }
- *f = append(*f, tmp)
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (f *IntSlice) String() string {
- return fmt.Sprintf("%#v", *f)
-}
-
-// Value returns the slice of ints set by this flag
-func (f *IntSlice) Value() []int {
- return *f
-}
-
-// Get returns the slice of ints set by this flag
-func (f *IntSlice) Get() interface{} {
- return *f
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f IntSliceFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- newVal := &IntSlice{}
- for _, s := range strings.Split(envVal, ",") {
- s = strings.TrimSpace(s)
- if err := newVal.Set(s); err != nil {
- return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err)
- }
- }
- if f.Value == nil {
- f.Value = newVal
- } else {
- *f.Value = *newVal
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Value == nil {
- f.Value = &IntSlice{}
- }
- set.Var(f.Value, name, f.Usage)
- })
-
- return nil
-}
-
-// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter
-type Int64Slice []int64
-
-// Set parses the value into an integer and appends it to the list of values
-func (f *Int64Slice) Set(value string) error {
- tmp, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return err
- }
- *f = append(*f, tmp)
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (f *Int64Slice) String() string {
- return fmt.Sprintf("%#v", *f)
-}
-
-// Value returns the slice of ints set by this flag
-func (f *Int64Slice) Value() []int64 {
- return *f
-}
-
-// Get returns the slice of ints set by this flag
-func (f *Int64Slice) Get() interface{} {
- return *f
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f Int64SliceFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- newVal := &Int64Slice{}
- for _, s := range strings.Split(envVal, ",") {
- s = strings.TrimSpace(s)
- if err := newVal.Set(s); err != nil {
- return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err)
- }
- }
- if f.Value == nil {
- f.Value = newVal
- } else {
- *f.Value = *newVal
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Value == nil {
- f.Value = &Int64Slice{}
- }
- set.Var(f.Value, name, f.Usage)
- })
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f BoolFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error {
- val := false
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- if envVal == "" {
- val = false
- } else {
- envValBool, err := strconv.ParseBool(envVal)
- if err != nil {
- return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err)
- }
- val = envValBool
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.BoolVar(f.Destination, name, val, f.Usage)
- return
- }
- set.Bool(name, val, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f BoolTFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error {
- val := true
-
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- if envVal == "" {
- val = false
- } else {
- envValBool, err := strconv.ParseBool(envVal)
- if err != nil {
- return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err)
- }
- val = envValBool
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.BoolVar(f.Destination, name, val, f.Usage)
- return
- }
- set.Bool(name, val, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f StringFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f StringFlag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- f.Value = envVal
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.StringVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.String(name, f.Value, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f IntFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f IntFlag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- envValInt, err := strconv.ParseInt(envVal, 0, 64)
- if err != nil {
- return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err)
- }
- f.Value = int(envValInt)
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.IntVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Int(name, f.Value, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f Int64Flag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- envValInt, err := strconv.ParseInt(envVal, 0, 64)
- if err != nil {
- return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err)
- }
-
- f.Value = envValInt
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.Int64Var(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Int64(name, f.Value, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f UintFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f UintFlag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- envValInt, err := strconv.ParseUint(envVal, 0, 64)
- if err != nil {
- return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err)
- }
-
- f.Value = uint(envValInt)
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.UintVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Uint(name, f.Value, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f Uint64Flag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- envValInt, err := strconv.ParseUint(envVal, 0, 64)
- if err != nil {
- return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err)
- }
-
- f.Value = uint64(envValInt)
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.Uint64Var(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Uint64(name, f.Value, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f DurationFlag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- envValDuration, err := time.ParseDuration(envVal)
- if err != nil {
- return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err)
- }
-
- f.Value = envValDuration
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.DurationVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Duration(name, f.Value, f.Usage)
- })
-
- return nil
-}
-
-// Apply populates the flag given the flag set and environment
-// Ignores errors
-func (f Float64Flag) Apply(set *flag.FlagSet) {
- f.ApplyWithError(set)
-}
-
-// ApplyWithError populates the flag given the flag set and environment
-func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error {
- if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok {
- envValFloat, err := strconv.ParseFloat(envVal, 10)
- if err != nil {
- return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err)
- }
-
- f.Value = float64(envValFloat)
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.Float64Var(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Float64(name, f.Value, f.Usage)
- })
-
- return nil
-}
-
-func visibleFlags(fl []Flag) []Flag {
- visible := []Flag{}
- for _, flag := range fl {
- field := flagValue(flag).FieldByName("Hidden")
- if !field.IsValid() || !field.Bool() {
- visible = append(visible, flag)
- }
- }
- return visible
-}
-
-func prefixFor(name string) (prefix string) {
- if len(name) == 1 {
- prefix = "-"
- } else {
- prefix = "--"
- }
-
- return
-}
-
-// Returns the placeholder, if any, and the unquoted usage string.
-func unquoteUsage(usage string) (string, string) {
- for i := 0; i < len(usage); i++ {
- if usage[i] == '`' {
- for j := i + 1; j < len(usage); j++ {
- if usage[j] == '`' {
- name := usage[i+1 : j]
- usage = usage[:i] + name + usage[j+1:]
- return name, usage
- }
- }
- break
- }
- }
- return "", usage
-}
-
-func prefixedNames(fullName, placeholder string) string {
- var prefixed string
- parts := strings.Split(fullName, ",")
- for i, name := range parts {
- name = strings.Trim(name, " ")
- prefixed += prefixFor(name) + name
- if placeholder != "" {
- prefixed += " " + placeholder
- }
- if i < len(parts)-1 {
- prefixed += ", "
- }
- }
- return prefixed
-}
-
-func withEnvHint(envVar, str string) string {
- envText := ""
- if envVar != "" {
- prefix := "$"
- suffix := ""
- sep := ", $"
- if runtime.GOOS == "windows" {
- prefix = "%"
- suffix = "%"
- sep = "%, %"
- }
- envText = " [" + prefix + strings.Join(strings.Split(envVar, ","), sep) + suffix + "]"
- }
- return str + envText
-}
-
-func withFileHint(filePath, str string) string {
- fileText := ""
- if filePath != "" {
- fileText = fmt.Sprintf(" [%s]", filePath)
- }
- return str + fileText
-}
-
-func flagValue(f Flag) reflect.Value {
- fv := reflect.ValueOf(f)
- for fv.Kind() == reflect.Ptr {
- fv = reflect.Indirect(fv)
- }
- return fv
-}
-
-func stringifyFlag(f Flag) string {
- fv := flagValue(f)
-
- switch f.(type) {
- case IntSliceFlag:
- return FlagFileHinter(
- fv.FieldByName("FilePath").String(),
- FlagEnvHinter(
- fv.FieldByName("EnvVar").String(),
- stringifyIntSliceFlag(f.(IntSliceFlag)),
- ),
- )
- case Int64SliceFlag:
- return FlagFileHinter(
- fv.FieldByName("FilePath").String(),
- FlagEnvHinter(
- fv.FieldByName("EnvVar").String(),
- stringifyInt64SliceFlag(f.(Int64SliceFlag)),
- ),
- )
- case StringSliceFlag:
- return FlagFileHinter(
- fv.FieldByName("FilePath").String(),
- FlagEnvHinter(
- fv.FieldByName("EnvVar").String(),
- stringifyStringSliceFlag(f.(StringSliceFlag)),
- ),
- )
- }
-
- placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String())
-
- needsPlaceholder := false
- defaultValueString := ""
-
- if val := fv.FieldByName("Value"); val.IsValid() {
- needsPlaceholder = true
- defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface())
-
- if val.Kind() == reflect.String && val.String() != "" {
- defaultValueString = fmt.Sprintf(" (default: %q)", val.String())
- }
- }
-
- if defaultValueString == " (default: )" {
- defaultValueString = ""
- }
-
- if needsPlaceholder && placeholder == "" {
- placeholder = defaultPlaceholder
- }
-
- usageWithDefault := strings.TrimSpace(usage + defaultValueString)
-
- return FlagFileHinter(
- fv.FieldByName("FilePath").String(),
- FlagEnvHinter(
- fv.FieldByName("EnvVar").String(),
- FlagNamePrefixer(fv.FieldByName("Name").String(), placeholder)+"\t"+usageWithDefault,
- ),
- )
-}
-
-func stringifyIntSliceFlag(f IntSliceFlag) string {
- defaultVals := []string{}
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, i := range f.Value.Value() {
- defaultVals = append(defaultVals, strconv.Itoa(i))
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Name, defaultVals)
-}
-
-func stringifyInt64SliceFlag(f Int64SliceFlag) string {
- defaultVals := []string{}
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, i := range f.Value.Value() {
- defaultVals = append(defaultVals, strconv.FormatInt(i, 10))
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Name, defaultVals)
-}
-
-func stringifyStringSliceFlag(f StringSliceFlag) string {
- defaultVals := []string{}
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, s := range f.Value.Value() {
- if len(s) > 0 {
- defaultVals = append(defaultVals, strconv.Quote(s))
- }
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Name, defaultVals)
-}
-
-func stringifySliceFlag(usage, name string, defaultVals []string) string {
- placeholder, usage := unquoteUsage(usage)
- if placeholder == "" {
- placeholder = defaultPlaceholder
- }
-
- defaultVal := ""
- if len(defaultVals) > 0 {
- defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", "))
- }
-
- usageWithDefault := strings.TrimSpace(usage + defaultVal)
- return FlagNamePrefixer(name, placeholder) + "\t" + usageWithDefault
-}
-
-func flagFromFileEnv(filePath, envName string) (val string, ok bool) {
- for _, envVar := range strings.Split(envName, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal, ok := syscall.Getenv(envVar); ok {
- return envVal, true
- }
- }
- for _, fileVar := range strings.Split(filePath, ",") {
- if data, err := ioutil.ReadFile(fileVar); err == nil {
- return string(data), true
- }
- }
- return "", false
-}
diff --git a/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/urfave/cli/flag_generated.go
deleted file mode 100644
index 001576c8b..000000000
--- a/vendor/github.com/urfave/cli/flag_generated.go
+++ /dev/null
@@ -1,640 +0,0 @@
-package cli
-
-import (
- "flag"
- "strconv"
- "time"
-)
-
-// WARNING: This file is generated!
-
-// BoolFlag is a flag with type bool
-type BoolFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Destination *bool
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f BoolFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f BoolFlag) GetName() string {
- return f.Name
-}
-
-// Bool looks up the value of a local BoolFlag, returns
-// false if not found
-func (c *Context) Bool(name string) bool {
- return lookupBool(name, c.flagSet)
-}
-
-// GlobalBool looks up the value of a global BoolFlag, returns
-// false if not found
-func (c *Context) GlobalBool(name string) bool {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupBool(name, fs)
- }
- return false
-}
-
-func lookupBool(name string, set *flag.FlagSet) bool {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseBool(f.Value.String())
- if err != nil {
- return false
- }
- return parsed
- }
- return false
-}
-
-// BoolTFlag is a flag with type bool that is true by default
-type BoolTFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Destination *bool
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f BoolTFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f BoolTFlag) GetName() string {
- return f.Name
-}
-
-// BoolT looks up the value of a local BoolTFlag, returns
-// false if not found
-func (c *Context) BoolT(name string) bool {
- return lookupBoolT(name, c.flagSet)
-}
-
-// GlobalBoolT looks up the value of a global BoolTFlag, returns
-// false if not found
-func (c *Context) GlobalBoolT(name string) bool {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupBoolT(name, fs)
- }
- return false
-}
-
-func lookupBoolT(name string, set *flag.FlagSet) bool {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseBool(f.Value.String())
- if err != nil {
- return false
- }
- return parsed
- }
- return false
-}
-
-// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration)
-type DurationFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value time.Duration
- Destination *time.Duration
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f DurationFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f DurationFlag) GetName() string {
- return f.Name
-}
-
-// Duration looks up the value of a local DurationFlag, returns
-// 0 if not found
-func (c *Context) Duration(name string) time.Duration {
- return lookupDuration(name, c.flagSet)
-}
-
-// GlobalDuration looks up the value of a global DurationFlag, returns
-// 0 if not found
-func (c *Context) GlobalDuration(name string) time.Duration {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupDuration(name, fs)
- }
- return 0
-}
-
-func lookupDuration(name string, set *flag.FlagSet) time.Duration {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := time.ParseDuration(f.Value.String())
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// Float64Flag is a flag with type float64
-type Float64Flag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value float64
- Destination *float64
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Float64Flag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Float64Flag) GetName() string {
- return f.Name
-}
-
-// Float64 looks up the value of a local Float64Flag, returns
-// 0 if not found
-func (c *Context) Float64(name string) float64 {
- return lookupFloat64(name, c.flagSet)
-}
-
-// GlobalFloat64 looks up the value of a global Float64Flag, returns
-// 0 if not found
-func (c *Context) GlobalFloat64(name string) float64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupFloat64(name, fs)
- }
- return 0
-}
-
-func lookupFloat64(name string, set *flag.FlagSet) float64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseFloat(f.Value.String(), 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// GenericFlag is a flag with type Generic
-type GenericFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value Generic
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f GenericFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f GenericFlag) GetName() string {
- return f.Name
-}
-
-// Generic looks up the value of a local GenericFlag, returns
-// nil if not found
-func (c *Context) Generic(name string) interface{} {
- return lookupGeneric(name, c.flagSet)
-}
-
-// GlobalGeneric looks up the value of a global GenericFlag, returns
-// nil if not found
-func (c *Context) GlobalGeneric(name string) interface{} {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupGeneric(name, fs)
- }
- return nil
-}
-
-func lookupGeneric(name string, set *flag.FlagSet) interface{} {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value, error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// Int64Flag is a flag with type int64
-type Int64Flag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value int64
- Destination *int64
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Int64Flag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Int64Flag) GetName() string {
- return f.Name
-}
-
-// Int64 looks up the value of a local Int64Flag, returns
-// 0 if not found
-func (c *Context) Int64(name string) int64 {
- return lookupInt64(name, c.flagSet)
-}
-
-// GlobalInt64 looks up the value of a global Int64Flag, returns
-// 0 if not found
-func (c *Context) GlobalInt64(name string) int64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupInt64(name, fs)
- }
- return 0
-}
-
-func lookupInt64(name string, set *flag.FlagSet) int64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// IntFlag is a flag with type int
-type IntFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value int
- Destination *int
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f IntFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f IntFlag) GetName() string {
- return f.Name
-}
-
-// Int looks up the value of a local IntFlag, returns
-// 0 if not found
-func (c *Context) Int(name string) int {
- return lookupInt(name, c.flagSet)
-}
-
-// GlobalInt looks up the value of a global IntFlag, returns
-// 0 if not found
-func (c *Context) GlobalInt(name string) int {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupInt(name, fs)
- }
- return 0
-}
-
-func lookupInt(name string, set *flag.FlagSet) int {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return int(parsed)
- }
- return 0
-}
-
-// IntSliceFlag is a flag with type *IntSlice
-type IntSliceFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value *IntSlice
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f IntSliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f IntSliceFlag) GetName() string {
- return f.Name
-}
-
-// IntSlice looks up the value of a local IntSliceFlag, returns
-// nil if not found
-func (c *Context) IntSlice(name string) []int {
- return lookupIntSlice(name, c.flagSet)
-}
-
-// GlobalIntSlice looks up the value of a global IntSliceFlag, returns
-// nil if not found
-func (c *Context) GlobalIntSlice(name string) []int {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupIntSlice(name, fs)
- }
- return nil
-}
-
-func lookupIntSlice(name string, set *flag.FlagSet) []int {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := (f.Value.(*IntSlice)).Value(), error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// Int64SliceFlag is a flag with type *Int64Slice
-type Int64SliceFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value *Int64Slice
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Int64SliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Int64SliceFlag) GetName() string {
- return f.Name
-}
-
-// Int64Slice looks up the value of a local Int64SliceFlag, returns
-// nil if not found
-func (c *Context) Int64Slice(name string) []int64 {
- return lookupInt64Slice(name, c.flagSet)
-}
-
-// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns
-// nil if not found
-func (c *Context) GlobalInt64Slice(name string) []int64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupInt64Slice(name, fs)
- }
- return nil
-}
-
-func lookupInt64Slice(name string, set *flag.FlagSet) []int64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// StringFlag is a flag with type string
-type StringFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value string
- Destination *string
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f StringFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f StringFlag) GetName() string {
- return f.Name
-}
-
-// String looks up the value of a local StringFlag, returns
-// "" if not found
-func (c *Context) String(name string) string {
- return lookupString(name, c.flagSet)
-}
-
-// GlobalString looks up the value of a global StringFlag, returns
-// "" if not found
-func (c *Context) GlobalString(name string) string {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupString(name, fs)
- }
- return ""
-}
-
-func lookupString(name string, set *flag.FlagSet) string {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value.String(), error(nil)
- if err != nil {
- return ""
- }
- return parsed
- }
- return ""
-}
-
-// StringSliceFlag is a flag with type *StringSlice
-type StringSliceFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value *StringSlice
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f StringSliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f StringSliceFlag) GetName() string {
- return f.Name
-}
-
-// StringSlice looks up the value of a local StringSliceFlag, returns
-// nil if not found
-func (c *Context) StringSlice(name string) []string {
- return lookupStringSlice(name, c.flagSet)
-}
-
-// GlobalStringSlice looks up the value of a global StringSliceFlag, returns
-// nil if not found
-func (c *Context) GlobalStringSlice(name string) []string {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupStringSlice(name, fs)
- }
- return nil
-}
-
-func lookupStringSlice(name string, set *flag.FlagSet) []string {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := (f.Value.(*StringSlice)).Value(), error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// Uint64Flag is a flag with type uint64
-type Uint64Flag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value uint64
- Destination *uint64
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Uint64Flag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Uint64Flag) GetName() string {
- return f.Name
-}
-
-// Uint64 looks up the value of a local Uint64Flag, returns
-// 0 if not found
-func (c *Context) Uint64(name string) uint64 {
- return lookupUint64(name, c.flagSet)
-}
-
-// GlobalUint64 looks up the value of a global Uint64Flag, returns
-// 0 if not found
-func (c *Context) GlobalUint64(name string) uint64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupUint64(name, fs)
- }
- return 0
-}
-
-func lookupUint64(name string, set *flag.FlagSet) uint64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// UintFlag is a flag with type uint
-type UintFlag struct {
- Name string
- Usage string
- EnvVar string
- FilePath string
- Hidden bool
- Value uint
- Destination *uint
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f UintFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f UintFlag) GetName() string {
- return f.Name
-}
-
-// Uint looks up the value of a local UintFlag, returns
-// 0 if not found
-func (c *Context) Uint(name string) uint {
- return lookupUint(name, c.flagSet)
-}
-
-// GlobalUint looks up the value of a global UintFlag, returns
-// 0 if not found
-func (c *Context) GlobalUint(name string) uint {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupUint(name, fs)
- }
- return 0
-}
-
-func lookupUint(name string, set *flag.FlagSet) uint {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return uint(parsed)
- }
- return 0
-}
diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go
deleted file mode 100644
index 0036b1130..000000000
--- a/vendor/github.com/urfave/cli/funcs.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package cli
-
-// BashCompleteFunc is an action to execute when the bash-completion flag is set
-type BashCompleteFunc func(*Context)
-
-// BeforeFunc is an action to execute before any subcommands are run, but after
-// the context is ready if a non-nil error is returned, no subcommands are run
-type BeforeFunc func(*Context) error
-
-// AfterFunc is an action to execute after any subcommands are run, but after the
-// subcommand has finished it is run even if Action() panics
-type AfterFunc func(*Context) error
-
-// ActionFunc is the action to execute when no subcommands are specified
-type ActionFunc func(*Context) error
-
-// CommandNotFoundFunc is executed if the proper command cannot be found
-type CommandNotFoundFunc func(*Context, string)
-
-// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying
-// customized usage error messages. This function is able to replace the
-// original error messages. If this function is not set, the "Incorrect usage"
-// is displayed and the execution is interrupted.
-type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error
-
-// ExitErrHandlerFunc is executed if provided in order to handle ExitError values
-// returned by Actions and Before/After functions.
-type ExitErrHandlerFunc func(context *Context, err error)
-
-// FlagStringFunc is used by the help generation to display a flag, which is
-// expected to be a single line.
-type FlagStringFunc func(Flag) string
-
-// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix
-// text for a flag's full name.
-type FlagNamePrefixFunc func(fullName, placeholder string) string
-
-// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help
-// with the environment variable details.
-type FlagEnvHintFunc func(envVar, str string) string
-
-// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help
-// with the file path details.
-type FlagFileHintFunc func(filePath, str string) string
diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go
deleted file mode 100644
index 65874fa2f..000000000
--- a/vendor/github.com/urfave/cli/help.go
+++ /dev/null
@@ -1,345 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
- "text/tabwriter"
- "text/template"
-)
-
-// AppHelpTemplate is the text template for the Default help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var AppHelpTemplate = `NAME:
- {{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
-
-USAGE:
- {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
-
-VERSION:
- {{.Version}}{{end}}{{end}}{{if .Description}}
-
-DESCRIPTION:
- {{.Description}}{{end}}{{if len .Authors}}
-
-AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
- {{range $index, $author := .Authors}}{{if $index}}
- {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
-
-COMMANDS:{{range .VisibleCategories}}{{if .Name}}
-
- {{.Name}}:{{end}}{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
-
-GLOBAL OPTIONS:
- {{range $index, $option := .VisibleFlags}}{{if $index}}
- {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}}
-
-COPYRIGHT:
- {{.Copyright}}{{end}}
-`
-
-// CommandHelpTemplate is the text template for the command help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var CommandHelpTemplate = `NAME:
- {{.HelpName}} - {{.Usage}}
-
-USAGE:
- {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
-
-CATEGORY:
- {{.Category}}{{end}}{{if .Description}}
-
-DESCRIPTION:
- {{.Description}}{{end}}{{if .VisibleFlags}}
-
-OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}
-`
-
-// SubcommandHelpTemplate is the text template for the subcommand help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var SubcommandHelpTemplate = `NAME:
- {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}}
-
-USAGE:
- {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
-
-COMMANDS:{{range .VisibleCategories}}{{if .Name}}
- {{.Name}}:{{end}}{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}
-{{end}}{{if .VisibleFlags}}
-OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}
-`
-
-var helpCommand = Command{
- Name: "help",
- Aliases: []string{"h"},
- Usage: "Shows a list of commands or help for one command",
- ArgsUsage: "[command]",
- Action: func(c *Context) error {
- args := c.Args()
- if args.Present() {
- return ShowCommandHelp(c, args.First())
- }
-
- ShowAppHelp(c)
- return nil
- },
-}
-
-var helpSubcommand = Command{
- Name: "help",
- Aliases: []string{"h"},
- Usage: "Shows a list of commands or help for one command",
- ArgsUsage: "[command]",
- Action: func(c *Context) error {
- args := c.Args()
- if args.Present() {
- return ShowCommandHelp(c, args.First())
- }
-
- return ShowSubcommandHelp(c)
- },
-}
-
-// Prints help for the App or Command
-type helpPrinter func(w io.Writer, templ string, data interface{})
-
-// Prints help for the App or Command with custom template function.
-type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{})
-
-// HelpPrinter is a function that writes the help output. If not set a default
-// is used. The function signature is:
-// func(w io.Writer, templ string, data interface{})
-var HelpPrinter helpPrinter = printHelp
-
-// HelpPrinterCustom is same as HelpPrinter but
-// takes a custom function for template function map.
-var HelpPrinterCustom helpPrinterCustom = printHelpCustom
-
-// VersionPrinter prints the version for the App
-var VersionPrinter = printVersion
-
-// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code.
-func ShowAppHelpAndExit(c *Context, exitCode int) {
- ShowAppHelp(c)
- os.Exit(exitCode)
-}
-
-// ShowAppHelp is an action that displays the help.
-func ShowAppHelp(c *Context) (err error) {
- if c.App.CustomAppHelpTemplate == "" {
- HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
- return
- }
- customAppData := func() map[string]interface{} {
- if c.App.ExtraInfo == nil {
- return nil
- }
- return map[string]interface{}{
- "ExtraInfo": c.App.ExtraInfo,
- }
- }
- HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData())
- return nil
-}
-
-// DefaultAppComplete prints the list of subcommands as the default app completion method
-func DefaultAppComplete(c *Context) {
- for _, command := range c.App.Commands {
- if command.Hidden {
- continue
- }
- if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" {
- for _, name := range command.Names() {
- fmt.Fprintf(c.App.Writer, "%s:%s\n", name, command.Usage)
- }
- } else {
- for _, name := range command.Names() {
- fmt.Fprintf(c.App.Writer, "%s\n", name)
- }
- }
- }
-}
-
-// ShowCommandHelpAndExit - exits with code after showing help
-func ShowCommandHelpAndExit(c *Context, command string, code int) {
- ShowCommandHelp(c, command)
- os.Exit(code)
-}
-
-// ShowCommandHelp prints help for the given command
-func ShowCommandHelp(ctx *Context, command string) error {
- // show the subcommand help for a command with subcommands
- if command == "" {
- HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
- return nil
- }
-
- for _, c := range ctx.App.Commands {
- if c.HasName(command) {
- if c.CustomHelpTemplate != "" {
- HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil)
- } else {
- HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
- }
- return nil
- }
- }
-
- if ctx.App.CommandNotFound == nil {
- return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3)
- }
-
- ctx.App.CommandNotFound(ctx, command)
- return nil
-}
-
-// ShowSubcommandHelp prints help for the given subcommand
-func ShowSubcommandHelp(c *Context) error {
- return ShowCommandHelp(c, c.Command.Name)
-}
-
-// ShowVersion prints the version number of the App
-func ShowVersion(c *Context) {
- VersionPrinter(c)
-}
-
-func printVersion(c *Context) {
- fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
-}
-
-// ShowCompletions prints the lists of commands within a given context
-func ShowCompletions(c *Context) {
- a := c.App
- if a != nil && a.BashComplete != nil {
- a.BashComplete(c)
- }
-}
-
-// ShowCommandCompletions prints the custom completions for a given command
-func ShowCommandCompletions(ctx *Context, command string) {
- c := ctx.App.Command(command)
- if c != nil && c.BashComplete != nil {
- c.BashComplete(ctx)
- }
-}
-
-func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) {
- funcMap := template.FuncMap{
- "join": strings.Join,
- }
- if customFunc != nil {
- for key, value := range customFunc {
- funcMap[key] = value
- }
- }
-
- w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
- t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
- err := t.Execute(w, data)
- if err != nil {
- // If the writer is closed, t.Execute will fail, and there's nothing
- // we can do to recover.
- if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
- fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
- }
- return
- }
- w.Flush()
-}
-
-func printHelp(out io.Writer, templ string, data interface{}) {
- printHelpCustom(out, templ, data, nil)
-}
-
-func checkVersion(c *Context) bool {
- found := false
- if VersionFlag.GetName() != "" {
- eachName(VersionFlag.GetName(), func(name string) {
- if c.GlobalBool(name) || c.Bool(name) {
- found = true
- }
- })
- }
- return found
-}
-
-func checkHelp(c *Context) bool {
- found := false
- if HelpFlag.GetName() != "" {
- eachName(HelpFlag.GetName(), func(name string) {
- if c.GlobalBool(name) || c.Bool(name) {
- found = true
- }
- })
- }
- return found
-}
-
-func checkCommandHelp(c *Context, name string) bool {
- if c.Bool("h") || c.Bool("help") {
- ShowCommandHelp(c, name)
- return true
- }
-
- return false
-}
-
-func checkSubcommandHelp(c *Context) bool {
- if c.Bool("h") || c.Bool("help") {
- ShowSubcommandHelp(c)
- return true
- }
-
- return false
-}
-
-func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
- if !a.EnableBashCompletion {
- return false, arguments
- }
-
- pos := len(arguments) - 1
- lastArg := arguments[pos]
-
- if lastArg != "--"+BashCompletionFlag.GetName() {
- return false, arguments
- }
-
- return true, arguments[:pos]
-}
-
-func checkCompletions(c *Context) bool {
- if !c.shellComplete {
- return false
- }
-
- if args := c.Args(); args.Present() {
- name := args.First()
- if cmd := c.App.Command(name); cmd != nil {
- // let the command handle the completion
- return false
- }
- }
-
- ShowCompletions(c)
- return true
-}
-
-func checkCommandCompletions(c *Context, name string) bool {
- if !c.shellComplete {
- return false
- }
-
- ShowCommandCompletions(c, name)
- return true
-}
diff --git a/vendor/github.com/urfave/cli/sort.go b/vendor/github.com/urfave/cli/sort.go
deleted file mode 100644
index 23d1c2f77..000000000
--- a/vendor/github.com/urfave/cli/sort.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package cli
-
-import "unicode"
-
-// lexicographicLess compares strings alphabetically considering case.
-func lexicographicLess(i, j string) bool {
- iRunes := []rune(i)
- jRunes := []rune(j)
-
- lenShared := len(iRunes)
- if lenShared > len(jRunes) {
- lenShared = len(jRunes)
- }
-
- for index := 0; index < lenShared; index++ {
- ir := iRunes[index]
- jr := jRunes[index]
-
- if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr {
- return lir < ljr
- }
-
- if ir != jr {
- return ir < jr
- }
- }
-
- return i < j
-}
diff --git a/vendor/github.com/vbauerster/mpb/LICENSE b/vendor/github.com/vbauerster/mpb/LICENSE
new file mode 100644
index 000000000..5e68ed21e
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (C) 2016-2018 Vladimir Bauer
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/vbauerster/mpb/README.md b/vendor/github.com/vbauerster/mpb/README.md
new file mode 100644
index 000000000..9b760647e
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/README.md
@@ -0,0 +1,117 @@
+# Multi Progress Bar
+
+[![GoDoc](https://godoc.org/github.com/vbauerster/mpb?status.svg)](https://godoc.org/github.com/vbauerster/mpb)
+[![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb)
+[![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb)
+[![codecov](https://codecov.io/gh/vbauerster/mpb/branch/master/graph/badge.svg)](https://codecov.io/gh/vbauerster/mpb)
+
+**mpb** is a Go lib for rendering progress bars in terminal applications.
+
+## Features
+
+* __Multiple Bars__: Multiple progress bars are supported
+* __Dynamic Total__: [Set total](https://github.com/vbauerster/mpb/issues/9#issuecomment-344448984) while bar is running
+* __Dynamic Add/Remove__: Dynamically add or remove bars
+* __Cancellation__: Cancel whole rendering process
+* __Predefined Decorators__: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter
+* __Decorator's width sync__: Synchronized decorator's width among multiple bars
+
+## Installation
+
+```sh
+go get github.com/vbauerster/mpb
+```
+
+_Note:_ it is preferable to go get from github.com, rather than gopkg.in. See issue [#11](https://github.com/vbauerster/mpb/issues/11).
+
+## Usage
+
+#### [Rendering single bar](examples/singleBar/main.go)
+```go
+ p := mpb.New(
+ // override default (80) width
+ mpb.WithWidth(64),
+ // override default "[=>-]" format
+ mpb.WithFormat("╢▌▌░╟"),
+ // override default 120ms refresh rate
+ mpb.WithRefreshRate(180*time.Millisecond),
+ )
+
+ total := 100
+ name := "Single Bar:"
+ // adding a single bar
+ bar := p.AddBar(int64(total),
+ mpb.PrependDecorators(
+ // display our name with one space on the right
+ decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}),
+ // replace ETA decorator with "done" message, OnComplete event
+ decor.OnComplete(
+ // ETA decorator with ewma age of 60, and width reservation of 4
+ decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WC{W: 4}), "done",
+ ),
+ ),
+ mpb.AppendDecorators(decor.Percentage()),
+ )
+ // simulating some work
+ max := 100 * time.Millisecond
+ for i := 0; i < total; i++ {
+ start := time.Now()
+ time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10)
+ // ewma based decorators require work duration measurement
+ bar.IncrBy(1, time.Since(start))
+ }
+ // wait for our bar to complete and flush
+ p.Wait()
+```
+
+#### [Rendering multiple bars](examples/simple/main.go)
+```go
+ var wg sync.WaitGroup
+ p := mpb.New(mpb.WithWaitGroup(&wg))
+ total, numBars := 100, 3
+ wg.Add(numBars)
+
+ for i := 0; i < numBars; i++ {
+ name := fmt.Sprintf("Bar#%d:", i)
+ bar := p.AddBar(int64(total),
+ mpb.PrependDecorators(
+ // simple name decorator
+ decor.Name(name),
+ // decor.DSyncWidth bit enables column width synchronization
+ decor.Percentage(decor.WCSyncSpace),
+ ),
+ mpb.AppendDecorators(
+ // replace ETA decorator with "done" message, OnComplete event
+ decor.OnComplete(
+ // ETA decorator with ewma age of 60
+ decor.EwmaETA(decor.ET_STYLE_GO, 60), "done",
+ ),
+ ),
+ )
+ // simulating some work
+ go func() {
+ defer wg.Done()
+ max := 100 * time.Millisecond
+ for i := 0; i < total; i++ {
+ start := time.Now()
+ time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10)
+ // ewma based decorators require work duration measurement
+ bar.IncrBy(1, time.Since(start))
+ }
+ }()
+ }
+ // wait for all bars to complete and flush
+ p.Wait()
+```
+
+#### [Dynamic total](examples/dynTotal/main.go)
+
+![dynamic total](examples/gifs/godEMrCZmJkHYH1X9dN4Nm0U7.svg)
+
+#### [Complex example](examples/complex/main.go)
+
+![complex](examples/gifs/wHzf1M7sd7B3zVa2scBMnjqRf.svg)
+
+#### [Bytes counters](examples/io/single/main.go)
+
+![byte counters](examples/gifs/hIpTa3A5rQz65ssiVuRJu87X6.svg)
diff --git a/vendor/github.com/vbauerster/mpb/bar.go b/vendor/github.com/vbauerster/mpb/bar.go
new file mode 100644
index 000000000..5a506fc84
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/bar.go
@@ -0,0 +1,455 @@
+package mpb
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "github.com/vbauerster/mpb/decor"
+ "github.com/vbauerster/mpb/internal"
+)
+
+const (
+ rLeft = iota
+ rFill
+ rTip
+ rEmpty
+ rRight
+)
+
+const formatLen = 5
+
+type barRunes [formatLen]rune
+
+// Bar represents a progress Bar
+type Bar struct {
+ priority int
+ index int
+
+ runningBar *Bar
+ cacheState *bState
+ operateState chan func(*bState)
+ int64Ch chan int64
+ boolCh chan bool
+ frameReaderCh chan *frameReader
+ syncTableCh chan [][]chan int
+
+ // done is closed by Bar's goroutine, after cacheState is written
+ done chan struct{}
+ // shutdown is closed from master Progress goroutine only
+ shutdown chan struct{}
+}
+
+type (
+ bState struct {
+ id int
+ width int
+ total int64
+ current int64
+ runes barRunes
+ trimLeftSpace bool
+ trimRightSpace bool
+ toComplete bool
+ removeOnComplete bool
+ barClearOnComplete bool
+ completeFlushed bool
+ aDecorators []decor.Decorator
+ pDecorators []decor.Decorator
+ amountReceivers []decor.AmountReceiver
+ shutdownListeners []decor.ShutdownListener
+ refill *refill
+ bufP, bufB, bufA *bytes.Buffer
+ bufNL *bytes.Buffer
+ panicMsg string
+ newLineExtendFn func(io.Writer, *decor.Statistics)
+
+ // following options are assigned to the *Bar
+ priority int
+ runningBar *Bar
+ }
+ refill struct {
+ char rune
+ till int64
+ }
+ frameReader struct {
+ io.Reader
+ extendedLines int
+ toShutdown bool
+ removeOnComplete bool
+ }
+)
+
+func newBar(wg *sync.WaitGroup, id int, total int64, cancel <-chan struct{}, options ...BarOption) *Bar {
+ if total <= 0 {
+ total = time.Now().Unix()
+ }
+
+ s := &bState{
+ id: id,
+ priority: id,
+ total: total,
+ }
+
+ for _, opt := range options {
+ if opt != nil {
+ opt(s)
+ }
+ }
+
+ s.bufP = bytes.NewBuffer(make([]byte, 0, s.width))
+ s.bufB = bytes.NewBuffer(make([]byte, 0, s.width))
+ s.bufA = bytes.NewBuffer(make([]byte, 0, s.width))
+
+ b := &Bar{
+ priority: s.priority,
+ runningBar: s.runningBar,
+ operateState: make(chan func(*bState)),
+ int64Ch: make(chan int64),
+ boolCh: make(chan bool),
+ frameReaderCh: make(chan *frameReader, 1),
+ syncTableCh: make(chan [][]chan int),
+ done: make(chan struct{}),
+ shutdown: make(chan struct{}),
+ }
+
+ if b.runningBar != nil {
+ b.priority = b.runningBar.priority
+ }
+
+ if s.newLineExtendFn != nil {
+ s.bufNL = bytes.NewBuffer(make([]byte, 0, s.width))
+ }
+
+ go b.serve(wg, s, cancel)
+ return b
+}
+
+// RemoveAllPrependers removes all prepend functions.
+func (b *Bar) RemoveAllPrependers() {
+ select {
+ case b.operateState <- func(s *bState) { s.pDecorators = nil }:
+ case <-b.done:
+ }
+}
+
+// RemoveAllAppenders removes all append functions.
+func (b *Bar) RemoveAllAppenders() {
+ select {
+ case b.operateState <- func(s *bState) { s.aDecorators = nil }:
+ case <-b.done:
+ }
+}
+
+// ProxyReader wraps r with metrics required for progress tracking.
+func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
+ if r == nil {
+ panic("expect io.Reader, got nil")
+ }
+ rc, ok := r.(io.ReadCloser)
+ if !ok {
+ rc = ioutil.NopCloser(r)
+ }
+ return &proxyReader{rc, b, time.Now()}
+}
+
+// ID returs id of the bar.
+func (b *Bar) ID() int {
+ select {
+ case b.operateState <- func(s *bState) { b.int64Ch <- int64(s.id) }:
+ return int(<-b.int64Ch)
+ case <-b.done:
+ return b.cacheState.id
+ }
+}
+
+// Current returns bar's current number, in other words sum of all increments.
+func (b *Bar) Current() int64 {
+ select {
+ case b.operateState <- func(s *bState) { b.int64Ch <- s.current }:
+ return <-b.int64Ch
+ case <-b.done:
+ return b.cacheState.current
+ }
+}
+
+// SetTotal sets total dynamically.
+// Set final to true, when total is known, it will trigger bar complete event.
+func (b *Bar) SetTotal(total int64, final bool) bool {
+ select {
+ case b.operateState <- func(s *bState) {
+ if total > 0 {
+ s.total = total
+ }
+ if final {
+ s.current = s.total
+ s.toComplete = true
+ }
+ }:
+ return true
+ case <-b.done:
+ return false
+ }
+}
+
+// SetRefill sets fill rune to r, up until n.
+func (b *Bar) SetRefill(n int, r rune) {
+ if n <= 0 {
+ return
+ }
+ b.operateState <- func(s *bState) {
+ s.refill = &refill{r, int64(n)}
+ }
+}
+
+// RefillBy is deprecated, use SetRefill
+func (b *Bar) RefillBy(n int, r rune) {
+ b.SetRefill(n, r)
+}
+
+// Increment is a shorthand for b.IncrBy(1).
+func (b *Bar) Increment() {
+ b.IncrBy(1)
+}
+
+// IncrBy increments progress bar by amount of n.
+// wdd is optional work duration i.e. time.Since(start),
+// which expected to be provided, if any ewma based decorator is used.
+func (b *Bar) IncrBy(n int, wdd ...time.Duration) {
+ select {
+ case b.operateState <- func(s *bState) {
+ s.current += int64(n)
+ if s.current >= s.total {
+ s.current = s.total
+ s.toComplete = true
+ }
+ for _, ar := range s.amountReceivers {
+ ar.NextAmount(n, wdd...)
+ }
+ }:
+ case <-b.done:
+ }
+}
+
+// Completed reports whether the bar is in completed state.
+func (b *Bar) Completed() bool {
+ // omit select here, because primary usage of the method is for loop
+ // condition, like for !bar.Completed() {...}
+ // so when toComplete=true it is called once (at which time, the bar is still alive),
+ // then quits the loop and never suppose to be called afterwards.
+ return <-b.boolCh
+}
+
+func (b *Bar) wSyncTable() [][]chan int {
+ select {
+ case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }:
+ return <-b.syncTableCh
+ case <-b.done:
+ return b.cacheState.wSyncTable()
+ }
+}
+
+func (b *Bar) serve(wg *sync.WaitGroup, s *bState, cancel <-chan struct{}) {
+ defer wg.Done()
+ for {
+ select {
+ case op := <-b.operateState:
+ op(s)
+ case b.boolCh <- s.toComplete:
+ case <-cancel:
+ s.toComplete = true
+ cancel = nil
+ case <-b.shutdown:
+ b.cacheState = s
+ close(b.done)
+ for _, sl := range s.shutdownListeners {
+ sl.Shutdown()
+ }
+ return
+ }
+ }
+}
+
+func (b *Bar) render(debugOut io.Writer, tw int) {
+ select {
+ case b.operateState <- func(s *bState) {
+ defer func() {
+ // recovering if user defined decorator panics for example
+ if p := recover(); p != nil {
+ s.panicMsg = fmt.Sprintf("panic: %v", p)
+ fmt.Fprintf(debugOut, "%s %s bar id %02d %v\n", "[mpb]", time.Now(), s.id, s.panicMsg)
+ b.frameReaderCh <- &frameReader{
+ Reader: strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", tw), s.panicMsg)),
+ toShutdown: true,
+ }
+ }
+ }()
+ r := s.draw(tw)
+ var extendedLines int
+ if s.newLineExtendFn != nil {
+ s.bufNL.Reset()
+ s.newLineExtendFn(s.bufNL, newStatistics(s))
+ extendedLines = countLines(s.bufNL.Bytes())
+ r = io.MultiReader(r, s.bufNL)
+ }
+ b.frameReaderCh <- &frameReader{
+ Reader: r,
+ extendedLines: extendedLines,
+ toShutdown: s.toComplete && !s.completeFlushed,
+ removeOnComplete: s.removeOnComplete,
+ }
+ s.completeFlushed = s.toComplete
+ }:
+ case <-b.done:
+ s := b.cacheState
+ r := s.draw(tw)
+ var extendedLines int
+ if s.newLineExtendFn != nil {
+ s.bufNL.Reset()
+ s.newLineExtendFn(s.bufNL, newStatistics(s))
+ extendedLines = countLines(s.bufNL.Bytes())
+ r = io.MultiReader(r, s.bufNL)
+ }
+ b.frameReaderCh <- &frameReader{
+ Reader: r,
+ extendedLines: extendedLines,
+ }
+ }
+}
+
+func (s *bState) draw(termWidth int) io.Reader {
+ defer s.bufA.WriteByte('\n')
+
+ if s.panicMsg != "" {
+ return strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", termWidth), s.panicMsg))
+ }
+
+ stat := newStatistics(s)
+
+ for _, d := range s.pDecorators {
+ s.bufP.WriteString(d.Decor(stat))
+ }
+
+ for _, d := range s.aDecorators {
+ s.bufA.WriteString(d.Decor(stat))
+ }
+
+ prependCount := utf8.RuneCount(s.bufP.Bytes())
+ appendCount := utf8.RuneCount(s.bufA.Bytes())
+
+ if s.barClearOnComplete && s.completeFlushed {
+ return io.MultiReader(s.bufP, s.bufA)
+ }
+
+ s.fillBar(s.width)
+ barCount := utf8.RuneCount(s.bufB.Bytes())
+ totalCount := prependCount + barCount + appendCount
+ if spaceCount := 0; totalCount > termWidth {
+ if !s.trimLeftSpace {
+ spaceCount++
+ }
+ if !s.trimRightSpace {
+ spaceCount++
+ }
+ s.fillBar(termWidth - prependCount - appendCount - spaceCount)
+ }
+
+ return io.MultiReader(s.bufP, s.bufB, s.bufA)
+}
+
+func (s *bState) fillBar(width int) {
+ defer func() {
+ s.bufB.WriteRune(s.runes[rRight])
+ if !s.trimRightSpace {
+ s.bufB.WriteByte(' ')
+ }
+ }()
+
+ s.bufB.Reset()
+ if !s.trimLeftSpace {
+ s.bufB.WriteByte(' ')
+ }
+ s.bufB.WriteRune(s.runes[rLeft])
+ if width <= 2 {
+ return
+ }
+
+ // bar s.width without leftEnd and rightEnd runes
+ barWidth := width - 2
+
+ completedWidth := internal.Percentage(s.total, s.current, int64(barWidth))
+
+ if s.refill != nil {
+ till := internal.Percentage(s.total, s.refill.till, int64(barWidth))
+ // append refill rune
+ var i int64
+ for i = 0; i < till; i++ {
+ s.bufB.WriteRune(s.refill.char)
+ }
+ for i = till; i < completedWidth; i++ {
+ s.bufB.WriteRune(s.runes[rFill])
+ }
+ } else {
+ var i int64
+ for i = 0; i < completedWidth; i++ {
+ s.bufB.WriteRune(s.runes[rFill])
+ }
+ }
+
+ if completedWidth < int64(barWidth) && completedWidth > 0 {
+ _, size := utf8.DecodeLastRune(s.bufB.Bytes())
+ s.bufB.Truncate(s.bufB.Len() - size)
+ s.bufB.WriteRune(s.runes[rTip])
+ }
+
+ for i := completedWidth; i < int64(barWidth); i++ {
+ s.bufB.WriteRune(s.runes[rEmpty])
+ }
+}
+
+func (s *bState) wSyncTable() [][]chan int {
+ columns := make([]chan int, 0, len(s.pDecorators)+len(s.aDecorators))
+ var pCount int
+ for _, d := range s.pDecorators {
+ if ok, ch := d.Syncable(); ok {
+ columns = append(columns, ch)
+ pCount++
+ }
+ }
+ var aCount int
+ for _, d := range s.aDecorators {
+ if ok, ch := d.Syncable(); ok {
+ columns = append(columns, ch)
+ aCount++
+ }
+ }
+ table := make([][]chan int, 2)
+ table[0] = columns[0:pCount]
+ table[1] = columns[pCount : pCount+aCount : pCount+aCount]
+ return table
+}
+
+func newStatistics(s *bState) *decor.Statistics {
+ return &decor.Statistics{
+ ID: s.id,
+ Completed: s.completeFlushed,
+ Total: s.total,
+ Current: s.current,
+ }
+}
+
+func strToBarRunes(format string) (array barRunes) {
+ for i, n := 0, 0; len(format) > 0; i++ {
+ array[i], n = utf8.DecodeRuneInString(format)
+ format = format[n:]
+ }
+ return
+}
+
+func countLines(b []byte) int {
+ return bytes.Count(b, []byte("\n"))
+}
diff --git a/vendor/github.com/vbauerster/mpb/bar_option.go b/vendor/github.com/vbauerster/mpb/bar_option.go
new file mode 100644
index 000000000..e33bce4da
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/bar_option.go
@@ -0,0 +1,124 @@
+package mpb
+
+import (
+ "io"
+
+ "github.com/vbauerster/mpb/decor"
+)
+
+// BarOption is a function option which changes the default behavior of a bar,
+// if passed to p.AddBar(int64, ...BarOption)
+type BarOption func(*bState)
+
+// AppendDecorators let you inject decorators to the bar's right side
+func AppendDecorators(appenders ...decor.Decorator) BarOption {
+ return func(s *bState) {
+ for _, decorator := range appenders {
+ if ar, ok := decorator.(decor.AmountReceiver); ok {
+ s.amountReceivers = append(s.amountReceivers, ar)
+ }
+ if sl, ok := decorator.(decor.ShutdownListener); ok {
+ s.shutdownListeners = append(s.shutdownListeners, sl)
+ }
+ s.aDecorators = append(s.aDecorators, decorator)
+ }
+ }
+}
+
+// PrependDecorators let you inject decorators to the bar's left side
+func PrependDecorators(prependers ...decor.Decorator) BarOption {
+ return func(s *bState) {
+ for _, decorator := range prependers {
+ if ar, ok := decorator.(decor.AmountReceiver); ok {
+ s.amountReceivers = append(s.amountReceivers, ar)
+ }
+ if sl, ok := decorator.(decor.ShutdownListener); ok {
+ s.shutdownListeners = append(s.shutdownListeners, sl)
+ }
+ s.pDecorators = append(s.pDecorators, decorator)
+ }
+ }
+}
+
+// BarTrimLeft trims left side space of the bar
+func BarTrimLeft() BarOption {
+ return func(s *bState) {
+ s.trimLeftSpace = true
+ }
+}
+
+// BarTrimRight trims right space of the bar
+func BarTrimRight() BarOption {
+ return func(s *bState) {
+ s.trimRightSpace = true
+ }
+}
+
+// BarTrim trims both left and right spaces of the bar
+func BarTrim() BarOption {
+ return func(s *bState) {
+ s.trimLeftSpace = true
+ s.trimRightSpace = true
+ }
+}
+
+// BarID overwrites internal bar id
+func BarID(id int) BarOption {
+ return func(s *bState) {
+ s.id = id
+ }
+}
+
+// BarRemoveOnComplete is a flag, if set whole bar line will be removed on complete event.
+// If both BarRemoveOnComplete and BarClearOnComplete are set, first bar section gets cleared
+// and then whole bar line gets removed completely.
+func BarRemoveOnComplete() BarOption {
+ return func(s *bState) {
+ s.removeOnComplete = true
+ }
+}
+
+// BarReplaceOnComplete is indicator for delayed bar start, after the `runningBar` is complete.
+// To achieve bar replacement effect, `runningBar` should has its `BarRemoveOnComplete` option set.
+func BarReplaceOnComplete(runningBar *Bar) BarOption {
+ return func(s *bState) {
+ s.runningBar = runningBar
+ }
+}
+
+// BarClearOnComplete is a flag, if set will clear bar section on complete event.
+// If you need to remove a whole bar line, refer to BarRemoveOnComplete.
+func BarClearOnComplete() BarOption {
+ return func(s *bState) {
+ s.barClearOnComplete = true
+ }
+}
+
+// BarPriority sets bar's priority.
+// Zero is highest priority, i.e. bar will be on top.
+// If `BarReplaceOnComplete` option is supplied, this option is ignored.
+func BarPriority(priority int) BarOption {
+ return func(s *bState) {
+ s.priority = priority
+ }
+}
+
+// BarNewLineExtend takes user defined efn, which gets called each render cycle.
+// Any write to provided writer of efn, will appear on new line of respective bar.
+func BarNewLineExtend(efn func(io.Writer, *decor.Statistics)) BarOption {
+ return func(s *bState) {
+ s.newLineExtendFn = efn
+ }
+}
+
+func barWidth(w int) BarOption {
+ return func(s *bState) {
+ s.width = w
+ }
+}
+
+func barFormat(format string) BarOption {
+ return func(s *bState) {
+ s.runes = strToBarRunes(format)
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/cwriter/writer.go
new file mode 100644
index 000000000..0b1470d4c
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/cwriter/writer.go
@@ -0,0 +1,78 @@
+package cwriter
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ isatty "github.com/mattn/go-isatty"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+// ESC is the ASCII code for escape character
+const ESC = 27
+
+var NotATTY = errors.New("not a terminal")
+
+var (
+ cursorUp = fmt.Sprintf("%c[%dA", ESC, 1)
+ clearLine = fmt.Sprintf("%c[2K\r", ESC)
+ clearCursorAndLine = cursorUp + clearLine
+)
+
+// Writer is a buffered the writer that updates the terminal.
+// The contents of writer will be flushed when Flush is called.
+type Writer struct {
+ out io.Writer
+ buf bytes.Buffer
+ isTerminal bool
+ fd int
+ lineCount int
+}
+
+// New returns a new Writer with defaults
+func New(out io.Writer) *Writer {
+ w := &Writer{out: out}
+ if f, ok := out.(*os.File); ok {
+ fd := f.Fd()
+ w.isTerminal = isatty.IsTerminal(fd)
+ w.fd = int(fd)
+ }
+ return w
+}
+
+// Flush flushes the underlying buffer
+func (w *Writer) Flush(lineCount int) error {
+ err := w.clearLines()
+ w.lineCount = lineCount
+ // WriteTo takes care of w.buf.Reset
+ if _, e := w.buf.WriteTo(w.out); err == nil {
+ err = e
+ }
+ return err
+}
+
+// Write appends the contents of p to the underlying buffer
+func (w *Writer) Write(p []byte) (n int, err error) {
+ return w.buf.Write(p)
+}
+
+// WriteString writes string to the underlying buffer
+func (w *Writer) WriteString(s string) (n int, err error) {
+ return w.buf.WriteString(s)
+}
+
+// ReadFrom reads from the provided io.Reader and writes to the underlying buffer.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ return w.buf.ReadFrom(r)
+}
+
+func (w *Writer) GetWidth() (int, error) {
+ if w.isTerminal {
+ tw, _, err := terminal.GetSize(w.fd)
+ return tw, err
+ }
+ return -1, NotATTY
+}
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go
new file mode 100644
index 000000000..05e31c480
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package cwriter
+
+import (
+ "io"
+ "strings"
+)
+
+func (w *Writer) clearLines() error {
+ _, err := io.WriteString(w.out, strings.Repeat(clearCursorAndLine, w.lineCount))
+ return err
+}
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go
new file mode 100644
index 000000000..dad7f50b2
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go
@@ -0,0 +1,77 @@
+// +build windows
+
+package cwriter
+
+import (
+ "io"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/mattn/go-isatty"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+)
+
+type (
+ short int16
+ word uint16
+ dword uint32
+
+ coord struct {
+ x short
+ y short
+ }
+ smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+ }
+ consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+ }
+)
+
+// FdWriter is a writer with a file descriptor.
+type FdWriter interface {
+ io.Writer
+ Fd() uintptr
+}
+
+func (w *Writer) clearLines() error {
+ f, ok := w.out.(FdWriter)
+ if ok && !isatty.IsTerminal(f.Fd()) {
+ _, err := io.WriteString(w.out, strings.Repeat(clearCursorAndLine, w.lineCount))
+ return err
+ }
+ fd := f.Fd()
+ var info consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&info)))
+
+ for i := 0; i < w.lineCount; i++ {
+ // move the cursor up
+ info.cursorPosition.y--
+ procSetConsoleCursorPosition.Call(fd, uintptr(*(*int32)(unsafe.Pointer(&info.cursorPosition))))
+ // clear the line
+ cursor := coord{
+ x: info.window.left,
+ y: info.window.top + info.cursorPosition.y,
+ }
+ var count, w dword
+ count = dword(info.size.x)
+ procFillConsoleOutputCharacter.Call(fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w)))
+ }
+ return nil
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/counters.go b/vendor/github.com/vbauerster/mpb/decor/counters.go
new file mode 100644
index 000000000..e4161dc4b
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/counters.go
@@ -0,0 +1,206 @@
+package decor
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+const (
+ _ = iota
+ KiB = 1 << (iota * 10)
+ MiB
+ GiB
+ TiB
+)
+
+const (
+ KB = 1000
+ MB = KB * 1000
+ GB = MB * 1000
+ TB = GB * 1000
+)
+
+const (
+ _ = iota
+ UnitKiB
+ UnitKB
+)
+
+type CounterKiB int64
+
+func (c CounterKiB) Format(st fmt.State, verb rune) {
+ prec, ok := st.Precision()
+
+ if verb == 'd' || !ok {
+ prec = 0
+ }
+ if verb == 'f' && !ok {
+ prec = 6
+ }
+ // retain old beahavior if s verb used
+ if verb == 's' {
+ prec = 1
+ }
+
+ var res, unit string
+ switch {
+ case c >= TiB:
+ unit = "TiB"
+ res = strconv.FormatFloat(float64(c)/TiB, 'f', prec, 64)
+ case c >= GiB:
+ unit = "GiB"
+ res = strconv.FormatFloat(float64(c)/GiB, 'f', prec, 64)
+ case c >= MiB:
+ unit = "MiB"
+ res = strconv.FormatFloat(float64(c)/MiB, 'f', prec, 64)
+ case c >= KiB:
+ unit = "KiB"
+ res = strconv.FormatFloat(float64(c)/KiB, 'f', prec, 64)
+ default:
+ unit = "b"
+ res = strconv.FormatInt(int64(c), 10)
+ }
+
+ if st.Flag(' ') {
+ res += " "
+ }
+ res += unit
+
+ if w, ok := st.Width(); ok {
+ if len(res) < w {
+ pad := strings.Repeat(" ", w-len(res))
+ if st.Flag(int('-')) {
+ res += pad
+ } else {
+ res = pad + res
+ }
+ }
+ }
+
+ io.WriteString(st, res)
+}
+
+type CounterKB int64
+
+func (c CounterKB) Format(st fmt.State, verb rune) {
+ prec, ok := st.Precision()
+
+ if verb == 'd' || !ok {
+ prec = 0
+ }
+ if verb == 'f' && !ok {
+ prec = 6
+ }
+ // retain old beahavior if s verb used
+ if verb == 's' {
+ prec = 1
+ }
+
+ var res, unit string
+ switch {
+ case c >= TB:
+ unit = "TB"
+ res = strconv.FormatFloat(float64(c)/TB, 'f', prec, 64)
+ case c >= GB:
+ unit = "GB"
+ res = strconv.FormatFloat(float64(c)/GB, 'f', prec, 64)
+ case c >= MB:
+ unit = "MB"
+ res = strconv.FormatFloat(float64(c)/MB, 'f', prec, 64)
+ case c >= KB:
+ unit = "kB"
+ res = strconv.FormatFloat(float64(c)/KB, 'f', prec, 64)
+ default:
+ unit = "b"
+ res = strconv.FormatInt(int64(c), 10)
+ }
+
+ if st.Flag(' ') {
+ res += " "
+ }
+ res += unit
+
+ if w, ok := st.Width(); ok {
+ if len(res) < w {
+ pad := strings.Repeat(" ", w-len(res))
+ if st.Flag(int('-')) {
+ res += pad
+ } else {
+ res = pad + res
+ }
+ }
+ }
+
+ io.WriteString(st, res)
+}
+
+// CountersNoUnit is a wrapper around Counters with no unit param.
+func CountersNoUnit(pairFormat string, wcc ...WC) Decorator {
+ return Counters(0, pairFormat, wcc...)
+}
+
+// CountersKibiByte is a wrapper around Counters with predefined unit UnitKiB (bytes/1024).
+func CountersKibiByte(pairFormat string, wcc ...WC) Decorator {
+ return Counters(UnitKiB, pairFormat, wcc...)
+}
+
+// CountersKiloByte is a wrapper around Counters with predefined unit UnitKB (bytes/1000).
+func CountersKiloByte(pairFormat string, wcc ...WC) Decorator {
+ return Counters(UnitKB, pairFormat, wcc...)
+}
+
+// Counters decorator with dynamic unit measure adjustment.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `pairFormat` printf compatible verbs for current and total, like "%f" or "%d"
+//
+// `wcc` optional WC config
+//
+// pairFormat example if UnitKB is chosen:
+//
+// "%.1f / %.1f" = "1.0MB / 12.0MB" or "% .1f / % .1f" = "1.0 MB / 12.0 MB"
+func Counters(unit int, pairFormat string, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &countersDecorator{
+ WC: wc,
+ unit: unit,
+ pairFormat: pairFormat,
+ }
+ return d
+}
+
+type countersDecorator struct {
+ WC
+ unit int
+ pairFormat string
+ completeMsg *string
+}
+
+func (d *countersDecorator) Decor(st *Statistics) string {
+ if st.Completed && d.completeMsg != nil {
+ return d.FormatMsg(*d.completeMsg)
+ }
+
+ var str string
+ switch d.unit {
+ case UnitKiB:
+ str = fmt.Sprintf(d.pairFormat, CounterKiB(st.Current), CounterKiB(st.Total))
+ case UnitKB:
+ str = fmt.Sprintf(d.pairFormat, CounterKB(st.Current), CounterKB(st.Total))
+ default:
+ str = fmt.Sprintf(d.pairFormat, st.Current, st.Total)
+ }
+
+ return d.FormatMsg(str)
+}
+
+func (d *countersDecorator) OnCompleteMessage(msg string) {
+ d.completeMsg = &msg
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/decorator.go b/vendor/github.com/vbauerster/mpb/decor/decorator.go
new file mode 100644
index 000000000..6aaf6c830
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/decorator.go
@@ -0,0 +1,144 @@
+package decor
+
+import (
+ "fmt"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // DidentRight bit specifies identation direction.
+ // |foo |b | With DidentRight
+ // | foo| b| Without DidentRight
+ DidentRight = 1 << iota
+
+ // DextraSpace bit adds extra space, makes sense with DSyncWidth only.
+ // When DidentRight bit set, the space will be added to the right,
+ // otherwise to the left.
+ DextraSpace
+
+ // DSyncWidth bit enables same column width synchronization.
+ // Effective with multiple bars only.
+ DSyncWidth
+
+ // DSyncWidthR is shortcut for DSyncWidth|DidentRight
+ DSyncWidthR = DSyncWidth | DidentRight
+
+ // DSyncSpace is shortcut for DSyncWidth|DextraSpace
+ DSyncSpace = DSyncWidth | DextraSpace
+
+ // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight
+ DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight
+)
+
+const (
+ ET_STYLE_GO = iota
+ ET_STYLE_HHMMSS
+ ET_STYLE_HHMM
+ ET_STYLE_MMSS
+)
+
+// Statistics is a struct, which gets passed to a Decorator.
+type Statistics struct {
+ ID int
+ Completed bool
+ Total int64
+ Current int64
+}
+
+// Decorator interface.
+// A decorator must implement this interface, in order to be used with mpb library.
+type Decorator interface {
+ Decor(*Statistics) string
+ Syncable
+}
+
+// Syncable interface.
+// All decorators implement this interface implicitly.
+// Its Syncable method exposes width sync channel, if sync is enabled.
+type Syncable interface {
+ Syncable() (bool, chan int)
+}
+
+// OnCompleteMessenger interface.
+// Decorators implementing this interface suppose to return provided string on complete event.
+type OnCompleteMessenger interface {
+ OnCompleteMessage(string)
+}
+
+// AmountReceiver interface.
+// If decorator needs to receive increment amount,
+// so this is the right interface to implement.
+type AmountReceiver interface {
+ NextAmount(int, ...time.Duration)
+}
+
+// ShutdownListener interface.
+// If decorator needs to be notified once upon bar shutdown event,
+// so this is the right interface to implement.
+type ShutdownListener interface {
+ Shutdown()
+}
+
+// Global convenience shortcuts
+var (
+ WCSyncWidth = WC{C: DSyncWidth}
+ WCSyncWidthR = WC{C: DSyncWidthR}
+ WCSyncSpace = WC{C: DSyncSpace}
+ WCSyncSpaceR = WC{C: DSyncSpaceR}
+)
+
+// WC is a struct with two public fields W and C, both of int type.
+// W represents width and C represents bit set of width related config.
+type WC struct {
+ W int
+ C int
+ format string
+ wsync chan int
+}
+
+// FormatMsg formats final message according to WC.W and WC.C.
+// Should be called by any Decorator implementation.
+func (wc WC) FormatMsg(msg string) string {
+ if (wc.C & DSyncWidth) != 0 {
+ wc.wsync <- utf8.RuneCountInString(msg)
+ max := <-wc.wsync
+ if max == 0 {
+ max = wc.W
+ }
+ if (wc.C & DextraSpace) != 0 {
+ max++
+ }
+ return fmt.Sprintf(fmt.Sprintf(wc.format, max), msg)
+ }
+ return fmt.Sprintf(fmt.Sprintf(wc.format, wc.W), msg)
+}
+
+// Init initializes width related config.
+func (wc *WC) Init() {
+ wc.format = "%%"
+ if (wc.C & DidentRight) != 0 {
+ wc.format += "-"
+ }
+ wc.format += "%ds"
+ if (wc.C & DSyncWidth) != 0 {
+ wc.wsync = make(chan int)
+ }
+}
+
+func (wc *WC) Syncable() (bool, chan int) {
+ return (wc.C & DSyncWidth) != 0, wc.wsync
+}
+
+// OnComplete returns decorator, which wraps provided decorator, with sole
+// purpose to display provided message on complete event.
+//
+// `decorator` Decorator to wrap
+//
+// `message` message to display on complete event
+func OnComplete(decorator Decorator, message string) Decorator {
+ if d, ok := decorator.(OnCompleteMessenger); ok {
+ d.OnCompleteMessage(message)
+ }
+ return decorator
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/doc.go b/vendor/github.com/vbauerster/mpb/decor/doc.go
new file mode 100644
index 000000000..561a8677c
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/doc.go
@@ -0,0 +1,25 @@
+// Copyright (C) 2016-2018 Vladimir Bauer
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ Package decor contains common decorators used by "github.com/vbauerster/mpb" package.
+
+ Some decorators returned by this package might have a closure state. It is ok to use
+ decorators concurrently, unless you share the same decorator among multiple
+ *mpb.Bar instances. To avoid data races, create new decorator per *mpb.Bar instance.
+
+ Don't:
+
+ p := mpb.New()
+ name := decor.Name("bar")
+ p.AddBar(100, mpb.AppendDecorators(name))
+ p.AddBar(100, mpb.AppendDecorators(name))
+
+ Do:
+
+ p := mpb.New()
+ p.AddBar(100, mpb.AppendDecorators(decor.Name("bar1")))
+ p.AddBar(100, mpb.AppendDecorators(decor.Name("bar2")))
+*/
+package decor
diff --git a/vendor/github.com/vbauerster/mpb/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/decor/elapsed.go
new file mode 100644
index 000000000..649d40a30
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/elapsed.go
@@ -0,0 +1,68 @@
+package decor
+
+import (
+ "fmt"
+ "time"
+)
+
+// Elapsed returns elapsed time decorator.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `wcc` optional WC config
+func Elapsed(style int, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &elapsedDecorator{
+ WC: wc,
+ style: style,
+ startTime: time.Now(),
+ }
+ return d
+}
+
+type elapsedDecorator struct {
+ WC
+ style int
+ startTime time.Time
+ msg string
+ completeMsg *string
+}
+
+func (d *elapsedDecorator) Decor(st *Statistics) string {
+ if st.Completed {
+ if d.completeMsg != nil {
+ return d.FormatMsg(*d.completeMsg)
+ }
+ return d.FormatMsg(d.msg)
+ }
+
+ timeElapsed := time.Since(d.startTime)
+ hours := int64((timeElapsed / time.Hour) % 60)
+ minutes := int64((timeElapsed / time.Minute) % 60)
+ seconds := int64((timeElapsed / time.Second) % 60)
+
+ switch d.style {
+ case ET_STYLE_GO:
+ d.msg = fmt.Sprint(time.Duration(timeElapsed.Seconds()) * time.Second)
+ case ET_STYLE_HHMMSS:
+ d.msg = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ case ET_STYLE_HHMM:
+ d.msg = fmt.Sprintf("%02d:%02d", hours, minutes)
+ case ET_STYLE_MMSS:
+ if hours > 0 {
+ d.msg = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ } else {
+ d.msg = fmt.Sprintf("%02d:%02d", minutes, seconds)
+ }
+ }
+
+ return d.FormatMsg(d.msg)
+}
+
+func (d *elapsedDecorator) OnCompleteMessage(msg string) {
+ d.completeMsg = &msg
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/eta.go b/vendor/github.com/vbauerster/mpb/decor/eta.go
new file mode 100644
index 000000000..44a1f03ea
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/eta.go
@@ -0,0 +1,207 @@
+package decor
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/VividCortex/ewma"
+ "github.com/vbauerster/mpb/internal"
+)
+
+type TimeNormalizer func(time.Duration) time.Duration
+
+// EwmaETA exponential-weighted-moving-average based ETA decorator.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `age` is the previous N samples to average over.
+//
+// `wcc` optional WC config
+func EwmaETA(style int, age float64, wcc ...WC) Decorator {
+ return MovingAverageETA(style, ewma.NewMovingAverage(age), NopNormalizer(), wcc...)
+}
+
+// MovingAverageETA decorator relies on MovingAverage implementation to calculate its average.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `average` available implementations of MovingAverage [ewma.MovingAverage|NewMedian|NewMedianEwma]
+//
+// `normalizer` available implementations are [NopNormalizer|FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
+//
+// `wcc` optional WC config
+func MovingAverageETA(style int, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &movingAverageETA{
+ WC: wc,
+ style: style,
+ average: average,
+ normalizer: normalizer,
+ }
+ return d
+}
+
+type movingAverageETA struct {
+ WC
+ style int
+ average ewma.MovingAverage
+ completeMsg *string
+ normalizer TimeNormalizer
+}
+
+func (d *movingAverageETA) Decor(st *Statistics) string {
+ if st.Completed && d.completeMsg != nil {
+ return d.FormatMsg(*d.completeMsg)
+ }
+
+ v := internal.Round(d.average.Value())
+ remaining := d.normalizer(time.Duration((st.Total - st.Current) * int64(v)))
+ hours := int64((remaining / time.Hour) % 60)
+ minutes := int64((remaining / time.Minute) % 60)
+ seconds := int64((remaining / time.Second) % 60)
+
+ var str string
+ switch d.style {
+ case ET_STYLE_GO:
+ str = fmt.Sprint(time.Duration(remaining.Seconds()) * time.Second)
+ case ET_STYLE_HHMMSS:
+ str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ case ET_STYLE_HHMM:
+ str = fmt.Sprintf("%02d:%02d", hours, minutes)
+ case ET_STYLE_MMSS:
+ if hours > 0 {
+ str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ } else {
+ str = fmt.Sprintf("%02d:%02d", minutes, seconds)
+ }
+ }
+
+ return d.FormatMsg(str)
+}
+
+func (d *movingAverageETA) NextAmount(n int, wdd ...time.Duration) {
+ var workDuration time.Duration
+ for _, wd := range wdd {
+ workDuration = wd
+ }
+ lastItemEstimate := float64(workDuration) / float64(n)
+ if math.IsInf(lastItemEstimate, 0) || math.IsNaN(lastItemEstimate) {
+ return
+ }
+ d.average.Add(lastItemEstimate)
+}
+
+func (d *movingAverageETA) OnCompleteMessage(msg string) {
+ d.completeMsg = &msg
+}
+
+// AverageETA decorator.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `wcc` optional WC config
+func AverageETA(style int, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &averageETA{
+ WC: wc,
+ style: style,
+ startTime: time.Now(),
+ }
+ return d
+}
+
+type averageETA struct {
+ WC
+ style int
+ startTime time.Time
+ completeMsg *string
+}
+
+func (d *averageETA) Decor(st *Statistics) string {
+ if st.Completed && d.completeMsg != nil {
+ return d.FormatMsg(*d.completeMsg)
+ }
+
+ var str string
+ timeElapsed := time.Since(d.startTime)
+ v := internal.Round(float64(timeElapsed) / float64(st.Current))
+ if math.IsInf(v, 0) || math.IsNaN(v) {
+ v = 0
+ }
+ remaining := time.Duration((st.Total - st.Current) * int64(v))
+ hours := int64((remaining / time.Hour) % 60)
+ minutes := int64((remaining / time.Minute) % 60)
+ seconds := int64((remaining / time.Second) % 60)
+
+ switch d.style {
+ case ET_STYLE_GO:
+ str = fmt.Sprint(time.Duration(remaining.Seconds()) * time.Second)
+ case ET_STYLE_HHMMSS:
+ str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ case ET_STYLE_HHMM:
+ str = fmt.Sprintf("%02d:%02d", hours, minutes)
+ case ET_STYLE_MMSS:
+ if hours > 0 {
+ str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ } else {
+ str = fmt.Sprintf("%02d:%02d", minutes, seconds)
+ }
+ }
+
+ return d.FormatMsg(str)
+}
+
+func (d *averageETA) OnCompleteMessage(msg string) {
+ d.completeMsg = &msg
+}
+
+func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer {
+ var normalized time.Duration
+ var lastCall time.Time
+ return func(remaining time.Duration) time.Duration {
+ if diff := normalized - remaining; diff <= 0 || diff > maxTolerate || remaining < maxTolerate/2 {
+ normalized = remaining
+ lastCall = time.Now()
+ return remaining
+ }
+ normalized -= time.Since(lastCall)
+ lastCall = time.Now()
+ return normalized
+ }
+}
+
+func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer {
+ var normalized time.Duration
+ var lastCall time.Time
+ var count int
+ return func(remaining time.Duration) time.Duration {
+ if count == 0 || remaining <= time.Duration(15*time.Second) {
+ count = updInterval
+ normalized = remaining
+ lastCall = time.Now()
+ return remaining
+ }
+ count--
+ normalized -= time.Since(lastCall)
+ lastCall = time.Now()
+ if normalized > 0 {
+ return normalized
+ }
+ return remaining
+ }
+}
+
+func NopNormalizer() TimeNormalizer {
+ return func(remaining time.Duration) time.Duration {
+ return remaining
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/moving-average.go b/vendor/github.com/vbauerster/mpb/decor/moving-average.go
new file mode 100644
index 000000000..f9596a27f
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/moving-average.go
@@ -0,0 +1,66 @@
+package decor
+
+import (
+ "sort"
+
+ "github.com/VividCortex/ewma"
+)
+
+// MovingAverage is the interface that computes a moving average over a time-
+// series stream of numbers. The average may be over a window or exponentially
+// decaying.
+type MovingAverage interface {
+ Add(float64)
+ Value() float64
+ Set(float64)
+}
+
+type medianWindow [3]float64
+
+func (s *medianWindow) Len() int { return len(s) }
+func (s *medianWindow) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s *medianWindow) Less(i, j int) bool { return s[i] < s[j] }
+
+func (s *medianWindow) Add(value float64) {
+ s[0], s[1] = s[1], s[2]
+ s[2] = value
+}
+
+func (s *medianWindow) Value() float64 {
+ tmp := *s
+ sort.Sort(&tmp)
+ return tmp[1]
+}
+
+func (s *medianWindow) Set(value float64) {
+ for i := 0; i < len(s); i++ {
+ s[i] = value
+ }
+}
+
+// NewMedian is fixed last 3 samples median MovingAverage.
+func NewMedian() MovingAverage {
+ return new(medianWindow)
+}
+
+type medianEwma struct {
+ count uint
+ median MovingAverage
+ MovingAverage
+}
+
+func (s *medianEwma) Add(v float64) {
+ s.median.Add(v)
+ if s.count >= 2 {
+ s.MovingAverage.Add(s.median.Value())
+ }
+ s.count++
+}
+
+// NewMedianEwma is ewma based MovingAverage, which gets its values from median MovingAverage.
+func NewMedianEwma(age ...float64) MovingAverage {
+ return &medianEwma{
+ MovingAverage: ewma.NewMovingAverage(age...),
+ median: NewMedian(),
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/name.go b/vendor/github.com/vbauerster/mpb/decor/name.go
new file mode 100644
index 000000000..a5a5d1469
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/name.go
@@ -0,0 +1,45 @@
+package decor
+
+// StaticName returns name decorator.
+//
+// `name` string to display
+//
+// `wcc` optional WC config
+func StaticName(name string, wcc ...WC) Decorator {
+ return Name(name, wcc...)
+}
+
+// Name returns name decorator.
+//
+// `name` string to display
+//
+// `wcc` optional WC config
+func Name(name string, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &nameDecorator{
+ WC: wc,
+ msg: name,
+ }
+ return d
+}
+
+type nameDecorator struct {
+ WC
+ msg string
+ complete *string
+}
+
+func (d *nameDecorator) Decor(st *Statistics) string {
+ if st.Completed && d.complete != nil {
+ return d.FormatMsg(*d.complete)
+ }
+ return d.FormatMsg(d.msg)
+}
+
+func (d *nameDecorator) OnCompleteMessage(msg string) {
+ d.complete = &msg
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/percentage.go b/vendor/github.com/vbauerster/mpb/decor/percentage.go
new file mode 100644
index 000000000..078fbcf89
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/percentage.go
@@ -0,0 +1,39 @@
+package decor
+
+import (
+ "fmt"
+
+ "github.com/vbauerster/mpb/internal"
+)
+
+// Percentage returns percentage decorator.
+//
+// `wcc` optional WC config
+func Percentage(wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &percentageDecorator{
+ WC: wc,
+ }
+ return d
+}
+
+type percentageDecorator struct {
+ WC
+ completeMsg *string
+}
+
+func (d *percentageDecorator) Decor(st *Statistics) string {
+ if st.Completed && d.completeMsg != nil {
+ return d.FormatMsg(*d.completeMsg)
+ }
+ str := fmt.Sprintf("%d %%", internal.Percentage(st.Total, st.Current, 100))
+ return d.FormatMsg(str)
+}
+
+func (d *percentageDecorator) OnCompleteMessage(msg string) {
+ d.completeMsg = &msg
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/speed.go b/vendor/github.com/vbauerster/mpb/decor/speed.go
new file mode 100644
index 000000000..395e5d04d
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/decor/speed.go
@@ -0,0 +1,270 @@
+package decor
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/VividCortex/ewma"
+)
+
+type SpeedKiB float64
+
+func (s SpeedKiB) Format(st fmt.State, verb rune) {
+ prec, ok := st.Precision()
+
+ if verb == 'd' || !ok {
+ prec = 0
+ }
+ if verb == 'f' && !ok {
+ prec = 6
+ }
+ // retain old beahavior if s verb used
+ if verb == 's' {
+ prec = 1
+ }
+
+ var res, unit string
+ switch {
+ case s >= TiB:
+ unit = "TiB/s"
+ res = strconv.FormatFloat(float64(s)/TiB, 'f', prec, 64)
+ case s >= GiB:
+ unit = "GiB/s"
+ res = strconv.FormatFloat(float64(s)/GiB, 'f', prec, 64)
+ case s >= MiB:
+ unit = "MiB/s"
+ res = strconv.FormatFloat(float64(s)/MiB, 'f', prec, 64)
+ case s >= KiB:
+ unit = "KiB/s"
+ res = strconv.FormatFloat(float64(s)/KiB, 'f', prec, 64)
+ default:
+ unit = "b/s"
+ res = strconv.FormatInt(int64(s), 10)
+ }
+
+ if st.Flag(' ') {
+ res += " "
+ }
+ res += unit
+
+ if w, ok := st.Width(); ok {
+ if len(res) < w {
+ pad := strings.Repeat(" ", w-len(res))
+ if st.Flag(int('-')) {
+ res += pad
+ } else {
+ res = pad + res
+ }
+ }
+ }
+
+ io.WriteString(st, res)
+}
+
+type SpeedKB float64
+
+func (s SpeedKB) Format(st fmt.State, verb rune) {
+ prec, ok := st.Precision()
+
+ if verb == 'd' || !ok {
+ prec = 0
+ }
+ if verb == 'f' && !ok {
+ prec = 6
+ }
+ // retain old beahavior if s verb used
+ if verb == 's' {
+ prec = 1
+ }
+
+ var res, unit string
+ switch {
+ case s >= TB:
+ unit = "TB/s"
+ res = strconv.FormatFloat(float64(s)/TB, 'f', prec, 64)
+ case s >= GB:
+ unit = "GB/s"
+ res = strconv.FormatFloat(float64(s)/GB, 'f', prec, 64)
+ case s >= MB:
+ unit = "MB/s"
+ res = strconv.FormatFloat(float64(s)/MB, 'f', prec, 64)
+ case s >= KB:
+ unit = "kB/s"
+ res = strconv.FormatFloat(float64(s)/KB, 'f', prec, 64)
+ default:
+ unit = "b/s"
+ res = strconv.FormatInt(int64(s), 10)
+ }
+
+ if st.Flag(' ') {
+ res += " "
+ }
+ res += unit
+
+ if w, ok := st.Width(); ok {
+ if len(res) < w {
+ pad := strings.Repeat(" ", w-len(res))
+ if st.Flag(int('-')) {
+ res += pad
+ } else {
+ res = pad + res
+ }
+ }
+ }
+
+ io.WriteString(st, res)
+}
+
+// EwmaSpeed exponential-weighted-moving-average based speed decorator,
+// with dynamic unit measure adjustment.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `unitFormat` printf compatible verb for value, like "%f" or "%d"
+//
+// `average` MovingAverage implementation
+//
+// `wcc` optional WC config
+//
+// unitFormat example if UnitKiB is chosen:
+//
+// "%.1f" = "1.0MiB/s" or "% .1f" = "1.0 MiB/s"
+func EwmaSpeed(unit int, unitFormat string, age float64, wcc ...WC) Decorator {
+ return MovingAverageSpeed(unit, unitFormat, ewma.NewMovingAverage(age), wcc...)
+}
+
+// MovingAverageSpeed decorator relies on MovingAverage implementation to calculate its average.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `unitFormat` printf compatible verb for value, like "%f" or "%d"
+//
+// `average` MovingAverage implementation
+//
+// `wcc` optional WC config
+func MovingAverageSpeed(unit int, unitFormat string, average MovingAverage, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &movingAverageSpeed{
+ WC: wc,
+ unit: unit,
+ unitFormat: unitFormat,
+ average: average,
+ }
+ return d
+}
+
+type movingAverageSpeed struct {
+ WC
+ unit int
+ unitFormat string
+ average ewma.MovingAverage
+ msg string
+ completeMsg *string
+}
+
+func (d *movingAverageSpeed) Decor(st *Statistics) string {
+ if st.Completed {
+ if d.completeMsg != nil {
+ return d.FormatMsg(*d.completeMsg)
+ }
+ return d.FormatMsg(d.msg)
+ }
+
+ speed := d.average.Value()
+ switch d.unit {
+ case UnitKiB:
+ d.msg = fmt.Sprintf(d.unitFormat, SpeedKiB(speed))
+ case UnitKB:
+ d.msg = fmt.Sprintf(d.unitFormat, SpeedKB(speed))
+ default:
+ d.msg = fmt.Sprintf(d.unitFormat, speed)
+ }
+
+ return d.FormatMsg(d.msg)
+}
+
+func (s *movingAverageSpeed) NextAmount(n int, wdd ...time.Duration) {
+ var workDuration time.Duration
+ for _, wd := range wdd {
+ workDuration = wd
+ }
+ speed := float64(n) / workDuration.Seconds() / 1000
+ if math.IsInf(speed, 0) || math.IsNaN(speed) {
+ return
+ }
+ s.average.Add(speed)
+}
+
+func (d *movingAverageSpeed) OnCompleteMessage(msg string) {
+ d.completeMsg = &msg
+}
+
+// AverageSpeed decorator with dynamic unit measure adjustment.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `unitFormat` printf compatible verb for value, like "%f" or "%d"
+//
+// `wcc` optional WC config
+//
+// unitFormat example if UnitKiB is chosen:
+//
+// "%.1f" = "1.0MiB/s" or "% .1f" = "1.0 MiB/s"
+func AverageSpeed(unit int, unitFormat string, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ wc.Init()
+ d := &averageSpeed{
+ WC: wc,
+ unit: unit,
+ unitFormat: unitFormat,
+ startTime: time.Now(),
+ }
+ return d
+}
+
+type averageSpeed struct {
+ WC
+ unit int
+ unitFormat string
+ startTime time.Time
+ msg string
+ completeMsg *string
+}
+
+func (d *averageSpeed) Decor(st *Statistics) string {
+ if st.Completed {
+ if d.completeMsg != nil {
+ return d.FormatMsg(*d.completeMsg)
+ }
+ return d.FormatMsg(d.msg)
+ }
+
+ timeElapsed := time.Since(d.startTime)
+ speed := float64(st.Current) / timeElapsed.Seconds()
+
+ switch d.unit {
+ case UnitKiB:
+ d.msg = fmt.Sprintf(d.unitFormat, SpeedKiB(speed))
+ case UnitKB:
+ d.msg = fmt.Sprintf(d.unitFormat, SpeedKB(speed))
+ default:
+ d.msg = fmt.Sprintf(d.unitFormat, speed)
+ }
+
+ return d.FormatMsg(d.msg)
+}
+
+func (d *averageSpeed) OnCompleteMessage(msg string) {
+ d.completeMsg = &msg
+}
diff --git a/vendor/github.com/vbauerster/mpb/doc.go b/vendor/github.com/vbauerster/mpb/doc.go
new file mode 100644
index 000000000..16245956a
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/doc.go
@@ -0,0 +1,6 @@
+// Copyright (C) 2016-2018 Vladimir Bauer
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mpb is a library for rendering progress bars in terminal applications.
+package mpb
diff --git a/vendor/github.com/vbauerster/mpb/internal/percentage.go b/vendor/github.com/vbauerster/mpb/internal/percentage.go
new file mode 100644
index 000000000..3c8defb7d
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/internal/percentage.go
@@ -0,0 +1,10 @@
+package internal
+
+// Percentage is a helper function, to calculate percentage.
+func Percentage(total, current, width int64) int64 {
+ if total <= 0 {
+ return 0
+ }
+ p := float64(width*current) / float64(total)
+ return int64(Round(p))
+}
diff --git a/vendor/github.com/vbauerster/mpb/internal/round.go b/vendor/github.com/vbauerster/mpb/internal/round.go
new file mode 100644
index 000000000..c54a789d2
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/internal/round.go
@@ -0,0 +1,49 @@
+package internal
+
+import "math"
+
+const (
+ uvone = 0x3FF0000000000000
+ mask = 0x7FF
+ shift = 64 - 11 - 1
+ bias = 1023
+ signMask = 1 << 63
+ fracMask = 1<<shift - 1
+)
+
+// Round returns the nearest integer, rounding half away from zero.
+//
+// Special cases are:
+// Round(±0) = ±0
+// Round(±Inf) = ±Inf
+// Round(NaN) = NaN
+func Round(x float64) float64 {
+ // Round is a faster implementation of:
+ //
+ // func Round(x float64) float64 {
+ // t := Trunc(x)
+ // if Abs(x-t) >= 0.5 {
+ // return t + Copysign(1, x)
+ // }
+ // return t
+ // }
+ bits := math.Float64bits(x)
+ e := uint(bits>>shift) & mask
+ if e < bias {
+ // Round abs(x) < 1 including denormals.
+ bits &= signMask // +-0
+ if e == bias-1 {
+ bits |= uvone // +-1
+ }
+ } else if e < bias+shift {
+ // Round any abs(x) >= 1 containing a fractional component [0,1).
+ //
+ // Numbers with larger exponents are returned unchanged since they
+ // must be either an integer, infinity, or NaN.
+ const half = 1 << (shift - 1)
+ e -= bias
+ bits += half >> e
+ bits &^= fracMask >> e
+ }
+ return math.Float64frombits(bits)
+}
diff --git a/vendor/github.com/vbauerster/mpb/options.go b/vendor/github.com/vbauerster/mpb/options.go
new file mode 100644
index 000000000..05d2ecf1f
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/options.go
@@ -0,0 +1,95 @@
+package mpb
+
+import (
+ "io"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "github.com/vbauerster/mpb/cwriter"
+)
+
+// ProgressOption is a function option which changes the default behavior of
+// progress pool, if passed to mpb.New(...ProgressOption)
+type ProgressOption func(*pState)
+
+// WithWaitGroup provides means to have a single joint point.
+// If *sync.WaitGroup is provided, you can safely call just p.Wait()
+// without calling Wait() on provided *sync.WaitGroup.
+// Makes sense when there are more than one bar to render.
+func WithWaitGroup(wg *sync.WaitGroup) ProgressOption {
+ return func(s *pState) {
+ s.uwg = wg
+ }
+}
+
+// WithWidth overrides default width 80
+func WithWidth(w int) ProgressOption {
+ return func(s *pState) {
+ if w >= 0 {
+ s.width = w
+ }
+ }
+}
+
+// WithFormat overrides default bar format "[=>-]"
+func WithFormat(format string) ProgressOption {
+ return func(s *pState) {
+ if utf8.RuneCountInString(format) == formatLen {
+ s.format = format
+ }
+ }
+}
+
+// WithRefreshRate overrides default 120ms refresh rate
+func WithRefreshRate(d time.Duration) ProgressOption {
+ return func(s *pState) {
+ if d < 10*time.Millisecond {
+ return
+ }
+ s.rr = d
+ }
+}
+
+// WithManualRefresh disables internal auto refresh time.Ticker.
+// Refresh will occur upon receive value from provided ch.
+func WithManualRefresh(ch <-chan time.Time) ProgressOption {
+ return func(s *pState) {
+ s.manualRefreshCh = ch
+ }
+}
+
+// WithCancel provide your cancel channel,
+// which you plan to close at some point.
+func WithCancel(ch <-chan struct{}) ProgressOption {
+ return func(s *pState) {
+ s.cancel = ch
+ }
+}
+
+// WithShutdownNotifier provided chanel will be closed, after all bars have been rendered.
+func WithShutdownNotifier(ch chan struct{}) ProgressOption {
+ return func(s *pState) {
+ s.shutdownNotifier = ch
+ }
+}
+
+// WithOutput overrides default output os.Stdout
+func WithOutput(w io.Writer) ProgressOption {
+ return func(s *pState) {
+ if w == nil {
+ return
+ }
+ s.cw = cwriter.New(w)
+ }
+}
+
+// WithDebugOutput sets debug output.
+func WithDebugOutput(w io.Writer) ProgressOption {
+ return func(s *pState) {
+ if w == nil {
+ return
+ }
+ s.debugOut = w
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/options_go1.7.go b/vendor/github.com/vbauerster/mpb/options_go1.7.go
new file mode 100644
index 000000000..ca9a5bad8
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/options_go1.7.go
@@ -0,0 +1,15 @@
+//+build go1.7
+
+package mpb
+
+import "context"
+
+// WithContext provided context will be used for cancellation purposes
+func WithContext(ctx context.Context) ProgressOption {
+ return func(s *pState) {
+ if ctx == nil {
+ panic("ctx must not be nil")
+ }
+ s.cancel = ctx.Done()
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/priority_queue.go b/vendor/github.com/vbauerster/mpb/priority_queue.go
new file mode 100644
index 000000000..7bc588c29
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/priority_queue.go
@@ -0,0 +1,40 @@
+package mpb
+
+import "container/heap"
+
+// A priorityQueue implements heap.Interface
+type priorityQueue []*Bar
+
+func (pq priorityQueue) Len() int { return len(pq) }
+
+func (pq priorityQueue) Less(i, j int) bool {
+ return pq[i].priority < pq[j].priority
+}
+
+func (pq priorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func (pq *priorityQueue) Push(x interface{}) {
+ n := len(*pq)
+ bar := x.(*Bar)
+ bar.index = n
+ *pq = append(*pq, bar)
+}
+
+func (pq *priorityQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ bar := old[n-1]
+ bar.index = -1 // for safety
+ *pq = old[0 : n-1]
+ return bar
+}
+
+// update modifies the priority of a Bar in the queue.
+func (pq *priorityQueue) update(bar *Bar, priority int) {
+ bar.priority = priority
+ heap.Fix(pq, bar.index)
+}
diff --git a/vendor/github.com/vbauerster/mpb/progress.go b/vendor/github.com/vbauerster/mpb/progress.go
new file mode 100644
index 000000000..d95fe45b7
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/progress.go
@@ -0,0 +1,251 @@
+package mpb
+
+import (
+ "container/heap"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/vbauerster/mpb/cwriter"
+)
+
+const (
+ // default RefreshRate
+ prr = 120 * time.Millisecond
+ // default width
+ pwidth = 80
+ // default format
+ pformat = "[=>-]"
+)
+
+// Progress represents the container that renders Progress bars
+type Progress struct {
+ wg *sync.WaitGroup
+ uwg *sync.WaitGroup
+ operateState chan func(*pState)
+ done chan struct{}
+}
+
+type pState struct {
+ bHeap *priorityQueue
+ shutdownPending []*Bar
+ heapUpdated bool
+ zeroWait bool
+ idCounter int
+ width int
+ format string
+ rr time.Duration
+ cw *cwriter.Writer
+ pMatrix map[int][]chan int
+ aMatrix map[int][]chan int
+
+ // following are provided by user
+ uwg *sync.WaitGroup
+ manualRefreshCh <-chan time.Time
+ cancel <-chan struct{}
+ shutdownNotifier chan struct{}
+ waitBars map[*Bar]*Bar
+ debugOut io.Writer
+}
+
+// New creates new Progress instance, which orchestrates bars rendering process.
+// Accepts mpb.ProgressOption funcs for customization.
+func New(options ...ProgressOption) *Progress {
+ pq := make(priorityQueue, 0)
+ heap.Init(&pq)
+ s := &pState{
+ bHeap: &pq,
+ width: pwidth,
+ format: pformat,
+ cw: cwriter.New(os.Stdout),
+ rr: prr,
+ waitBars: make(map[*Bar]*Bar),
+ debugOut: ioutil.Discard,
+ }
+
+ for _, opt := range options {
+ if opt != nil {
+ opt(s)
+ }
+ }
+
+ p := &Progress{
+ uwg: s.uwg,
+ wg: new(sync.WaitGroup),
+ operateState: make(chan func(*pState)),
+ done: make(chan struct{}),
+ }
+ go p.serve(s)
+ return p
+}
+
+// AddBar creates a new progress bar and adds to the container.
+func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
+ p.wg.Add(1)
+ result := make(chan *Bar)
+ select {
+ case p.operateState <- func(s *pState) {
+ options = append(options, barWidth(s.width), barFormat(s.format))
+ b := newBar(p.wg, s.idCounter, total, s.cancel, options...)
+ if b.runningBar != nil {
+ s.waitBars[b.runningBar] = b
+ } else {
+ heap.Push(s.bHeap, b)
+ s.heapUpdated = true
+ }
+ s.idCounter++
+ result <- b
+ }:
+ return <-result
+ case <-p.done:
+ p.wg.Done()
+ return nil
+ }
+}
+
+// Abort is only effective while bar progress is running,
+// it means remove bar now without waiting for its completion.
+// If bar is already completed, there is nothing to abort.
+// If you need to remove bar after completion, use BarRemoveOnComplete BarOption.
+func (p *Progress) Abort(b *Bar, remove bool) {
+ select {
+ case p.operateState <- func(s *pState) {
+ if b.index < 0 {
+ return
+ }
+ if remove {
+ s.heapUpdated = heap.Remove(s.bHeap, b.index) != nil
+ }
+ s.shutdownPending = append(s.shutdownPending, b)
+ }:
+ case <-p.done:
+ }
+}
+
+// UpdateBarPriority provides a way to change bar's order position.
+// Zero is highest priority, i.e. bar will be on top.
+func (p *Progress) UpdateBarPriority(b *Bar, priority int) {
+ select {
+ case p.operateState <- func(s *pState) { s.bHeap.update(b, priority) }:
+ case <-p.done:
+ }
+}
+
+// BarCount returns bars count
+func (p *Progress) BarCount() int {
+ result := make(chan int, 1)
+ select {
+ case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }:
+ return <-result
+ case <-p.done:
+ return 0
+ }
+}
+
+// Wait first waits for user provided *sync.WaitGroup, if any,
+// then waits far all bars to complete and finally shutdowns master goroutine.
+// After this method has been called, there is no way to reuse *Progress instance.
+func (p *Progress) Wait() {
+ if p.uwg != nil {
+ p.uwg.Wait()
+ }
+
+ p.wg.Wait()
+
+ select {
+ case p.operateState <- func(s *pState) { s.zeroWait = true }:
+ <-p.done
+ case <-p.done:
+ }
+}
+
+func (s *pState) updateSyncMatrix() {
+ s.pMatrix = make(map[int][]chan int)
+ s.aMatrix = make(map[int][]chan int)
+ for i := 0; i < s.bHeap.Len(); i++ {
+ bar := (*s.bHeap)[i]
+ table := bar.wSyncTable()
+ pRow, aRow := table[0], table[1]
+
+ for i, ch := range pRow {
+ s.pMatrix[i] = append(s.pMatrix[i], ch)
+ }
+
+ for i, ch := range aRow {
+ s.aMatrix[i] = append(s.aMatrix[i], ch)
+ }
+ }
+}
+
+func (s *pState) render(tw int) {
+ if s.heapUpdated {
+ s.updateSyncMatrix()
+ s.heapUpdated = false
+ }
+ syncWidth(s.pMatrix)
+ syncWidth(s.aMatrix)
+
+ for i := 0; i < s.bHeap.Len(); i++ {
+ bar := (*s.bHeap)[i]
+ go bar.render(s.debugOut, tw)
+ }
+
+ if err := s.flush(s.bHeap.Len()); err != nil {
+ fmt.Fprintf(s.debugOut, "%s %s %v\n", "[mpb]", time.Now(), err)
+ }
+}
+
+func (s *pState) flush(lineCount int) error {
+ for s.bHeap.Len() > 0 {
+ bar := heap.Pop(s.bHeap).(*Bar)
+ frameReader := <-bar.frameReaderCh
+ defer func() {
+ if frameReader.toShutdown {
+ // shutdown at next flush, in other words decrement underlying WaitGroup
+ // only after the bar with completed state has been flushed.
+ // this ensures no bar ends up with less than 100% rendered.
+ s.shutdownPending = append(s.shutdownPending, bar)
+ if replacementBar, ok := s.waitBars[bar]; ok {
+ heap.Push(s.bHeap, replacementBar)
+ s.heapUpdated = true
+ delete(s.waitBars, bar)
+ }
+ if frameReader.removeOnComplete {
+ s.heapUpdated = true
+ return
+ }
+ }
+ heap.Push(s.bHeap, bar)
+ }()
+ s.cw.ReadFrom(frameReader)
+ lineCount += frameReader.extendedLines
+ }
+
+ for i := len(s.shutdownPending) - 1; i >= 0; i-- {
+ close(s.shutdownPending[i].shutdown)
+ s.shutdownPending = s.shutdownPending[:i]
+ }
+
+ return s.cw.Flush(lineCount)
+}
+
+func syncWidth(matrix map[int][]chan int) {
+ for _, column := range matrix {
+ column := column
+ go func() {
+ var maxWidth int
+ for _, ch := range column {
+ w := <-ch
+ if w > maxWidth {
+ maxWidth = w
+ }
+ }
+ for _, ch := range column {
+ ch <- maxWidth
+ }
+ }()
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/progress_posix.go b/vendor/github.com/vbauerster/mpb/progress_posix.go
new file mode 100644
index 000000000..545245a42
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/progress_posix.go
@@ -0,0 +1,70 @@
+// +build !windows
+
+package mpb
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+)
+
+func (p *Progress) serve(s *pState) {
+
+ var ticker *time.Ticker
+ var refreshCh <-chan time.Time
+ var winch chan os.Signal
+ var resumeTimer *time.Timer
+ var resumeEvent <-chan time.Time
+ winchIdleDur := s.rr * 2
+
+ if s.manualRefreshCh == nil {
+ ticker = time.NewTicker(s.rr)
+ refreshCh = ticker.C
+ winch = make(chan os.Signal, 2)
+ signal.Notify(winch, syscall.SIGWINCH)
+ } else {
+ refreshCh = s.manualRefreshCh
+ }
+
+ for {
+ select {
+ case op := <-p.operateState:
+ op(s)
+ case <-refreshCh:
+ if s.zeroWait {
+ if s.manualRefreshCh == nil {
+ signal.Stop(winch)
+ ticker.Stop()
+ }
+ if s.shutdownNotifier != nil {
+ close(s.shutdownNotifier)
+ }
+ close(p.done)
+ return
+ }
+ tw, err := s.cw.GetWidth()
+ if err != nil {
+ tw = s.width
+ }
+ s.render(tw)
+ case <-winch:
+ tw, err := s.cw.GetWidth()
+ if err != nil {
+ tw = s.width
+ }
+ s.render(tw - tw/8)
+ if resumeTimer != nil && resumeTimer.Reset(winchIdleDur) {
+ break
+ }
+ ticker.Stop()
+ resumeTimer = time.NewTimer(winchIdleDur)
+ resumeEvent = resumeTimer.C
+ case <-resumeEvent:
+ ticker = time.NewTicker(s.rr)
+ refreshCh = ticker.C
+ resumeEvent = nil
+ resumeTimer = nil
+ }
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/progress_windows.go b/vendor/github.com/vbauerster/mpb/progress_windows.go
new file mode 100644
index 000000000..cab03d36c
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/progress_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package mpb
+
+import (
+ "time"
+)
+
+func (p *Progress) serve(s *pState) {
+
+ var ticker *time.Ticker
+ var refreshCh <-chan time.Time
+
+ if s.manualRefreshCh == nil {
+ ticker = time.NewTicker(s.rr)
+ refreshCh = ticker.C
+ } else {
+ refreshCh = s.manualRefreshCh
+ }
+
+ for {
+ select {
+ case op := <-p.operateState:
+ op(s)
+ case <-refreshCh:
+ if s.zeroWait {
+ if s.manualRefreshCh == nil {
+ ticker.Stop()
+ }
+ if s.shutdownNotifier != nil {
+ close(s.shutdownNotifier)
+ }
+ close(p.done)
+ return
+ }
+ tw, err := s.cw.GetWidth()
+ if err != nil {
+ tw = s.width
+ }
+ s.render(tw)
+ }
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/proxyreader.go b/vendor/github.com/vbauerster/mpb/proxyreader.go
new file mode 100644
index 000000000..d2692ccf4
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/proxyreader.go
@@ -0,0 +1,22 @@
+package mpb
+
+import (
+ "io"
+ "time"
+)
+
+// proxyReader is io.Reader wrapper, for proxy read bytes
+type proxyReader struct {
+ io.ReadCloser
+ bar *Bar
+ iT time.Time
+}
+
+func (pr *proxyReader) Read(p []byte) (n int, err error) {
+ n, err = pr.ReadCloser.Read(p)
+ if n > 0 {
+ pr.bar.IncrBy(n, time.Since(pr.iT))
+ pr.iT = time.Now()
+ }
+ return
+}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE b/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE
deleted file mode 100644
index 511970333..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE
+++ /dev/null
@@ -1,12 +0,0 @@
-Copyright (c) 2012-2015, Sergey Cherepanov
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/README.md b/vendor/gopkg.in/cheggaaa/pb.v1/README.md
deleted file mode 100644
index 1295df70e..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/README.md
+++ /dev/null
@@ -1,177 +0,0 @@
-# Terminal progress bar for Go
-
-Simple progress bar for console programs.
-
-Please check the new version https://github.com/cheggaaa/pb/tree/v2 (currently, it's beta)
-
-## Installation
-
-```
-go get gopkg.in/cheggaaa/pb.v1
-```
-
-## Usage
-
-```Go
-package main
-
-import (
- "gopkg.in/cheggaaa/pb.v1"
- "time"
-)
-
-func main() {
- count := 100000
- bar := pb.StartNew(count)
- for i := 0; i < count; i++ {
- bar.Increment()
- time.Sleep(time.Millisecond)
- }
- bar.FinishPrint("The End!")
-}
-
-```
-
-Result will be like this:
-
-```
-> go run test.go
-37158 / 100000 [================>_______________________________] 37.16% 1m11s
-```
-
-## Customization
-
-```Go
-// create bar
-bar := pb.New(count)
-
-// refresh info every second (default 200ms)
-bar.SetRefreshRate(time.Second)
-
-// show percents (by default already true)
-bar.ShowPercent = true
-
-// show bar (by default already true)
-bar.ShowBar = true
-
-// no counters
-bar.ShowCounters = false
-
-// show "time left"
-bar.ShowTimeLeft = true
-
-// show average speed
-bar.ShowSpeed = true
-
-// sets the width of the progress bar
-bar.SetWidth(80)
-
-// sets the width of the progress bar, but if terminal size smaller will be ignored
-bar.SetMaxWidth(80)
-
-// convert output to readable format (like KB, MB)
-bar.SetUnits(pb.U_BYTES)
-
-// and start
-bar.Start()
-```
-
-## Progress bar for IO Operations
-
-```go
-// create and start bar
-bar := pb.New(myDataLen).SetUnits(pb.U_BYTES)
-bar.Start()
-
-// my io.Reader
-r := myReader
-
-// my io.Writer
-w := myWriter
-
-// create proxy reader
-reader := bar.NewProxyReader(r)
-
-// and copy from pb reader
-io.Copy(w, reader)
-
-```
-
-```go
-// create and start bar
-bar := pb.New(myDataLen).SetUnits(pb.U_BYTES)
-bar.Start()
-
-// my io.Reader
-r := myReader
-
-// my io.Writer
-w := myWriter
-
-// create multi writer
-writer := io.MultiWriter(w, bar)
-
-// and copy
-io.Copy(writer, r)
-
-bar.Finish()
-```
-
-## Custom Progress Bar Look-and-feel
-
-```go
-bar.Format("<.- >")
-```
-
-## Multiple Progress Bars (experimental and unstable)
-
-Do not print to terminal while pool is active.
-
-```go
-package main
-
-import (
- "math/rand"
- "sync"
- "time"
-
- "gopkg.in/cheggaaa/pb.v1"
-)
-
-func main() {
- // create bars
- first := pb.New(200).Prefix("First ")
- second := pb.New(200).Prefix("Second ")
- third := pb.New(200).Prefix("Third ")
- // start pool
- pool, err := pb.StartPool(first, second, third)
- if err != nil {
- panic(err)
- }
- // update bars
- wg := new(sync.WaitGroup)
- for _, bar := range []*pb.ProgressBar{first, second, third} {
- wg.Add(1)
- go func(cb *pb.ProgressBar) {
- for n := 0; n < 200; n++ {
- cb.Increment()
- time.Sleep(time.Millisecond * time.Duration(rand.Intn(100)))
- }
- cb.Finish()
- wg.Done()
- }(bar)
- }
- wg.Wait()
- // close pool
- pool.Stop()
-}
-```
-
-The result will be as follows:
-
-```
-$ go run example/multiple.go
-First 34 / 200 [=========>---------------------------------------------] 17.00% 00m08s
-Second 42 / 200 [===========>------------------------------------------] 21.00% 00m06s
-Third 36 / 200 [=========>---------------------------------------------] 18.00% 00m08s
-```
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/format.go b/vendor/gopkg.in/cheggaaa/pb.v1/format.go
deleted file mode 100644
index 8bb8a7a1d..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/format.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package pb
-
-import (
- "fmt"
- "time"
-)
-
-type Units int
-
-const (
- // U_NO are default units, they represent a simple value and are not formatted at all.
- U_NO Units = iota
- // U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...)
- U_BYTES
- // U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...)
- U_BYTES_DEC
- // U_DURATION units are formatted in a human readable way (3h14m15s)
- U_DURATION
-)
-
-const (
- KiB = 1024
- MiB = 1048576
- GiB = 1073741824
- TiB = 1099511627776
-
- KB = 1e3
- MB = 1e6
- GB = 1e9
- TB = 1e12
-)
-
-func Format(i int64) *formatter {
- return &formatter{n: i}
-}
-
-type formatter struct {
- n int64
- unit Units
- width int
- perSec bool
-}
-
-func (f *formatter) To(unit Units) *formatter {
- f.unit = unit
- return f
-}
-
-func (f *formatter) Width(width int) *formatter {
- f.width = width
- return f
-}
-
-func (f *formatter) PerSec() *formatter {
- f.perSec = true
- return f
-}
-
-func (f *formatter) String() (out string) {
- switch f.unit {
- case U_BYTES:
- out = formatBytes(f.n)
- case U_BYTES_DEC:
- out = formatBytesDec(f.n)
- case U_DURATION:
- out = formatDuration(f.n)
- default:
- out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n)
- }
- if f.perSec {
- out += "/s"
- }
- return
-}
-
-// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B
-func formatBytes(i int64) (result string) {
- switch {
- case i >= TiB:
- result = fmt.Sprintf("%.02f TiB", float64(i)/TiB)
- case i >= GiB:
- result = fmt.Sprintf("%.02f GiB", float64(i)/GiB)
- case i >= MiB:
- result = fmt.Sprintf("%.02f MiB", float64(i)/MiB)
- case i >= KiB:
- result = fmt.Sprintf("%.02f KiB", float64(i)/KiB)
- default:
- result = fmt.Sprintf("%d B", i)
- }
- return
-}
-
-// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B
-func formatBytesDec(i int64) (result string) {
- switch {
- case i >= TB:
- result = fmt.Sprintf("%.02f TB", float64(i)/TB)
- case i >= GB:
- result = fmt.Sprintf("%.02f GB", float64(i)/GB)
- case i >= MB:
- result = fmt.Sprintf("%.02f MB", float64(i)/MB)
- case i >= KB:
- result = fmt.Sprintf("%.02f KB", float64(i)/KB)
- default:
- result = fmt.Sprintf("%d B", i)
- }
- return
-}
-
-func formatDuration(n int64) (result string) {
- d := time.Duration(n)
- if d > time.Hour*24 {
- result = fmt.Sprintf("%dd", d/24/time.Hour)
- d -= (d / time.Hour / 24) * (time.Hour * 24)
- }
- if d > time.Hour {
- result = fmt.Sprintf("%s%dh", result, d/time.Hour)
- d -= d / time.Hour * time.Hour
- }
- m := d / time.Minute
- d -= m * time.Minute
- s := d / time.Second
- result = fmt.Sprintf("%s%02dm%02ds", result, m, s)
- return
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go
deleted file mode 100644
index e03291b89..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go
+++ /dev/null
@@ -1,503 +0,0 @@
-// Simple console progress bars
-package pb
-
-import (
- "fmt"
- "io"
- "math"
- "strings"
- "sync"
- "sync/atomic"
- "time"
- "unicode/utf8"
-)
-
-// Current version
-const Version = "1.0.27"
-
-const (
- // Default refresh rate - 200ms
- DEFAULT_REFRESH_RATE = time.Millisecond * 200
- FORMAT = "[=>-]"
-)
-
-// DEPRECATED
-// variables for backward compatibility, from now do not work
-// use pb.Format and pb.SetRefreshRate
-var (
- DefaultRefreshRate = DEFAULT_REFRESH_RATE
- BarStart, BarEnd, Empty, Current, CurrentN string
-)
-
-// Create new progress bar object
-func New(total int) *ProgressBar {
- return New64(int64(total))
-}
-
-// Create new progress bar object using int64 as total
-func New64(total int64) *ProgressBar {
- pb := &ProgressBar{
- Total: total,
- RefreshRate: DEFAULT_REFRESH_RATE,
- ShowPercent: true,
- ShowCounters: true,
- ShowBar: true,
- ShowTimeLeft: true,
- ShowElapsedTime: false,
- ShowFinalTime: true,
- Units: U_NO,
- ManualUpdate: false,
- finish: make(chan struct{}),
- }
- return pb.Format(FORMAT)
-}
-
-// Create new object and start
-func StartNew(total int) *ProgressBar {
- return New(total).Start()
-}
-
-// Callback for custom output
-// For example:
-// bar.Callback = func(s string) {
-// mySuperPrint(s)
-// }
-//
-type Callback func(out string)
-
-type ProgressBar struct {
- current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
- previous int64
-
- Total int64
- RefreshRate time.Duration
- ShowPercent, ShowCounters bool
- ShowSpeed, ShowTimeLeft, ShowBar bool
- ShowFinalTime, ShowElapsedTime bool
- Output io.Writer
- Callback Callback
- NotPrint bool
- Units Units
- Width int
- ForceWidth bool
- ManualUpdate bool
- AutoStat bool
-
- // Default width for the time box.
- UnitsWidth int
- TimeBoxWidth int
-
- finishOnce sync.Once //Guards isFinish
- finish chan struct{}
- isFinish bool
-
- startTime time.Time
- startValue int64
-
- changeTime time.Time
-
- prefix, postfix string
-
- mu sync.Mutex
- lastPrint string
-
- BarStart string
- BarEnd string
- Empty string
- Current string
- CurrentN string
-
- AlwaysUpdate bool
-}
-
-// Start print
-func (pb *ProgressBar) Start() *ProgressBar {
- pb.startTime = time.Now()
- pb.startValue = atomic.LoadInt64(&pb.current)
- if atomic.LoadInt64(&pb.Total) == 0 {
- pb.ShowTimeLeft = false
- pb.ShowPercent = false
- pb.AutoStat = false
- }
- if !pb.ManualUpdate {
- pb.Update() // Initial printing of the bar before running the bar refresher.
- go pb.refresher()
- }
- return pb
-}
-
-// Increment current value
-func (pb *ProgressBar) Increment() int {
- return pb.Add(1)
-}
-
-// Get current value
-func (pb *ProgressBar) Get() int64 {
- c := atomic.LoadInt64(&pb.current)
- return c
-}
-
-// Set current value
-func (pb *ProgressBar) Set(current int) *ProgressBar {
- return pb.Set64(int64(current))
-}
-
-// Set64 sets the current value as int64
-func (pb *ProgressBar) Set64(current int64) *ProgressBar {
- atomic.StoreInt64(&pb.current, current)
- return pb
-}
-
-// Add to current value
-func (pb *ProgressBar) Add(add int) int {
- return int(pb.Add64(int64(add)))
-}
-
-func (pb *ProgressBar) Add64(add int64) int64 {
- return atomic.AddInt64(&pb.current, add)
-}
-
-// Set prefix string
-func (pb *ProgressBar) Prefix(prefix string) *ProgressBar {
- pb.mu.Lock()
- defer pb.mu.Unlock()
- pb.prefix = prefix
- return pb
-}
-
-// Set postfix string
-func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
- pb.mu.Lock()
- defer pb.mu.Unlock()
- pb.postfix = postfix
- return pb
-}
-
-// Set custom format for bar
-// Example: bar.Format("[=>_]")
-// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter
-func (pb *ProgressBar) Format(format string) *ProgressBar {
- var formatEntries []string
- if utf8.RuneCountInString(format) == 5 {
- formatEntries = strings.Split(format, "")
- } else {
- formatEntries = strings.Split(format, "\x00")
- }
- if len(formatEntries) == 5 {
- pb.BarStart = formatEntries[0]
- pb.BarEnd = formatEntries[4]
- pb.Empty = formatEntries[3]
- pb.Current = formatEntries[1]
- pb.CurrentN = formatEntries[2]
- }
- return pb
-}
-
-// Set bar refresh rate
-func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar {
- pb.RefreshRate = rate
- return pb
-}
-
-// Set units
-// bar.SetUnits(U_NO) - by default
-// bar.SetUnits(U_BYTES) - for Mb, Kb, etc
-func (pb *ProgressBar) SetUnits(units Units) *ProgressBar {
- pb.Units = units
- return pb
-}
-
-// Set max width, if width is bigger than terminal width, will be ignored
-func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar {
- pb.Width = width
- pb.ForceWidth = false
- return pb
-}
-
-// Set bar width
-func (pb *ProgressBar) SetWidth(width int) *ProgressBar {
- pb.Width = width
- pb.ForceWidth = true
- return pb
-}
-
-// End print
-func (pb *ProgressBar) Finish() {
- //Protect multiple calls
- pb.finishOnce.Do(func() {
- close(pb.finish)
- pb.write(atomic.LoadInt64(&pb.Total), atomic.LoadInt64(&pb.current))
- pb.mu.Lock()
- defer pb.mu.Unlock()
- switch {
- case pb.Output != nil:
- fmt.Fprintln(pb.Output)
- case !pb.NotPrint:
- fmt.Println()
- }
- pb.isFinish = true
- })
-}
-
-// IsFinished return boolean
-func (pb *ProgressBar) IsFinished() bool {
- pb.mu.Lock()
- defer pb.mu.Unlock()
- return pb.isFinish
-}
-
-// End print and write string 'str'
-func (pb *ProgressBar) FinishPrint(str string) {
- pb.Finish()
- if pb.Output != nil {
- fmt.Fprintln(pb.Output, str)
- } else {
- fmt.Println(str)
- }
-}
-
-// implement io.Writer
-func (pb *ProgressBar) Write(p []byte) (n int, err error) {
- n = len(p)
- pb.Add(n)
- return
-}
-
-// implement io.Reader
-func (pb *ProgressBar) Read(p []byte) (n int, err error) {
- n = len(p)
- pb.Add(n)
- return
-}
-
-// Create new proxy reader over bar
-// Takes io.Reader or io.ReadCloser
-func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader {
- return &Reader{r, pb}
-}
-
-func (pb *ProgressBar) write(total, current int64) {
- pb.mu.Lock()
- defer pb.mu.Unlock()
- width := pb.GetWidth()
-
- var percentBox, countersBox, timeLeftBox, timeSpentBox, speedBox, barBox, end, out string
-
- // percents
- if pb.ShowPercent {
- var percent float64
- if total > 0 {
- percent = float64(current) / (float64(total) / float64(100))
- } else {
- percent = float64(current) / float64(100)
- }
- percentBox = fmt.Sprintf(" %6.02f%%", percent)
- }
-
- // counters
- if pb.ShowCounters {
- current := Format(current).To(pb.Units).Width(pb.UnitsWidth)
- if total > 0 {
- totalS := Format(total).To(pb.Units).Width(pb.UnitsWidth)
- countersBox = fmt.Sprintf(" %s / %s ", current, totalS)
- } else {
- countersBox = fmt.Sprintf(" %s / ? ", current)
- }
- }
-
- // time left
- currentFromStart := current - pb.startValue
- fromStart := time.Now().Sub(pb.startTime)
- lastChangeTime := pb.changeTime
- fromChange := lastChangeTime.Sub(pb.startTime)
-
- if pb.ShowElapsedTime {
- timeSpentBox = fmt.Sprintf(" %s ", (fromStart/time.Second)*time.Second)
- }
-
- select {
- case <-pb.finish:
- if pb.ShowFinalTime {
- var left time.Duration
- left = (fromStart / time.Second) * time.Second
- timeLeftBox = fmt.Sprintf(" %s", left.String())
- }
- default:
- if pb.ShowTimeLeft && currentFromStart > 0 {
- perEntry := fromChange / time.Duration(currentFromStart)
- var left time.Duration
- if total > 0 {
- left = time.Duration(total-currentFromStart) * perEntry
- left -= time.Since(lastChangeTime)
- left = (left / time.Second) * time.Second
- } else {
- left = time.Duration(currentFromStart) * perEntry
- left = (left / time.Second) * time.Second
- }
- if left > 0 {
- timeLeft := Format(int64(left)).To(U_DURATION).String()
- timeLeftBox = fmt.Sprintf(" %s", timeLeft)
- }
- }
- }
-
- if len(timeLeftBox) < pb.TimeBoxWidth {
- timeLeftBox = fmt.Sprintf("%s%s", strings.Repeat(" ", pb.TimeBoxWidth-len(timeLeftBox)), timeLeftBox)
- }
-
- // speed
- if pb.ShowSpeed && currentFromStart > 0 {
- fromStart := time.Now().Sub(pb.startTime)
- speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second))
- speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String()
- }
-
- barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeSpentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix)
- // bar
- if pb.ShowBar {
- size := width - barWidth
- if size > 0 {
- if total > 0 {
- curSize := int(math.Ceil((float64(current) / float64(total)) * float64(size)))
- emptySize := size - curSize
- barBox = pb.BarStart
- if emptySize < 0 {
- emptySize = 0
- }
- if curSize > size {
- curSize = size
- }
-
- cursorLen := escapeAwareRuneCountInString(pb.Current)
- if emptySize <= 0 {
- barBox += strings.Repeat(pb.Current, curSize/cursorLen)
- } else if curSize > 0 {
- cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN)
- cursorRepetitions := (curSize - cursorEndLen) / cursorLen
- barBox += strings.Repeat(pb.Current, cursorRepetitions)
- barBox += pb.CurrentN
- }
-
- emptyLen := escapeAwareRuneCountInString(pb.Empty)
- barBox += strings.Repeat(pb.Empty, emptySize/emptyLen)
- barBox += pb.BarEnd
- } else {
- pos := size - int(current)%int(size)
- barBox = pb.BarStart
- if pos-1 > 0 {
- barBox += strings.Repeat(pb.Empty, pos-1)
- }
- barBox += pb.Current
- if size-pos-1 > 0 {
- barBox += strings.Repeat(pb.Empty, size-pos-1)
- }
- barBox += pb.BarEnd
- }
- }
- }
-
- // check len
- out = pb.prefix + timeSpentBox + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix
-
- if cl := escapeAwareRuneCountInString(out); cl < width {
- end = strings.Repeat(" ", width-cl)
- }
-
- // and print!
- pb.lastPrint = out + end
- isFinish := pb.isFinish
-
- switch {
- case isFinish:
- return
- case pb.Output != nil:
- fmt.Fprint(pb.Output, "\r"+out+end)
- case pb.Callback != nil:
- pb.Callback(out + end)
- case !pb.NotPrint:
- fmt.Print("\r" + out + end)
- }
-}
-
-// GetTerminalWidth - returns terminal width for all platforms.
-func GetTerminalWidth() (int, error) {
- return terminalWidth()
-}
-
-func (pb *ProgressBar) GetWidth() int {
- if pb.ForceWidth {
- return pb.Width
- }
-
- width := pb.Width
- termWidth, _ := terminalWidth()
- if width == 0 || termWidth <= width {
- width = termWidth
- }
-
- return width
-}
-
-// Write the current state of the progressbar
-func (pb *ProgressBar) Update() {
- c := atomic.LoadInt64(&pb.current)
- p := atomic.LoadInt64(&pb.previous)
- t := atomic.LoadInt64(&pb.Total)
- if p != c {
- pb.mu.Lock()
- pb.changeTime = time.Now()
- pb.mu.Unlock()
- atomic.StoreInt64(&pb.previous, c)
- }
- pb.write(t, c)
- if pb.AutoStat {
- if c == 0 {
- pb.startTime = time.Now()
- pb.startValue = 0
- } else if c >= t && pb.isFinish != true {
- pb.Finish()
- }
- }
-}
-
-// String return the last bar print
-func (pb *ProgressBar) String() string {
- pb.mu.Lock()
- defer pb.mu.Unlock()
- return pb.lastPrint
-}
-
-// SetTotal atomically sets new total count
-func (pb *ProgressBar) SetTotal(total int) *ProgressBar {
- return pb.SetTotal64(int64(total))
-}
-
-// SetTotal64 atomically sets new total count
-func (pb *ProgressBar) SetTotal64(total int64) *ProgressBar {
- atomic.StoreInt64(&pb.Total, total)
- return pb
-}
-
-// Reset bar and set new total count
-// Does effect only on finished bar
-func (pb *ProgressBar) Reset(total int) *ProgressBar {
- pb.mu.Lock()
- defer pb.mu.Unlock()
- if pb.isFinish {
- pb.SetTotal(total).Set(0)
- atomic.StoreInt64(&pb.previous, 0)
- }
- return pb
-}
-
-// Internal loop for refreshing the progressbar
-func (pb *ProgressBar) refresher() {
- for {
- select {
- case <-pb.finish:
- return
- case <-time.After(pb.RefreshRate):
- pb.Update()
- }
- }
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go
deleted file mode 100644
index 17168f39a..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build appengine js
-
-package pb
-
-import "errors"
-
-// terminalWidth returns width of the terminal, which is not supported
-// and should always failed on appengine classic which is a sandboxed PaaS.
-func terminalWidth() (int, error) {
- return 0, errors.New("Not supported")
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go
deleted file mode 100644
index 9595e8236..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// +build windows
-
-package pb
-
-import (
- "errors"
- "fmt"
- "os"
- "sync"
- "syscall"
- "unsafe"
-)
-
-var tty = os.Stdin
-
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
- // GetConsoleScreenBufferInfo retrieves information about the
- // specified console screen buffer.
- // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
-
- // GetConsoleMode retrieves the current input mode of a console's
- // input buffer or the current output mode of a console screen buffer.
- // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
- getConsoleMode = kernel32.NewProc("GetConsoleMode")
-
- // SetConsoleMode sets the input mode of a console's input buffer
- // or the output mode of a console screen buffer.
- // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
- setConsoleMode = kernel32.NewProc("SetConsoleMode")
-
- // SetConsoleCursorPosition sets the cursor position in the
- // specified console screen buffer.
- // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
- setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
-)
-
-type (
- // Defines the coordinates of the upper left and lower right corners
- // of a rectangle.
- // See
- // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx
- smallRect struct {
- Left, Top, Right, Bottom int16
- }
-
- // Defines the coordinates of a character cell in a console screen
- // buffer. The origin of the coordinate system (0,0) is at the top, left cell
- // of the buffer.
- // See
- // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx
- coordinates struct {
- X, Y int16
- }
-
- word int16
-
- // Contains information about a console screen buffer.
- // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx
- consoleScreenBufferInfo struct {
- dwSize coordinates
- dwCursorPosition coordinates
- wAttributes word
- srWindow smallRect
- dwMaximumWindowSize coordinates
- }
-)
-
-// terminalWidth returns width of the terminal.
-func terminalWidth() (width int, err error) {
- var info consoleScreenBufferInfo
- _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
- if e != 0 {
- return 0, error(e)
- }
- return int(info.dwSize.X) - 1, nil
-}
-
-func getCursorPos() (pos coordinates, err error) {
- var info consoleScreenBufferInfo
- _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
- if e != 0 {
- return info.dwCursorPosition, error(e)
- }
- return info.dwCursorPosition, nil
-}
-
-func setCursorPos(pos coordinates) error {
- _, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0)
- if e != 0 {
- return error(e)
- }
- return nil
-}
-
-var ErrPoolWasStarted = errors.New("Bar pool was started")
-
-var echoLocked bool
-var echoLockMutex sync.Mutex
-
-var oldState word
-
-func lockEcho() (shutdownCh chan struct{}, err error) {
- echoLockMutex.Lock()
- defer echoLockMutex.Unlock()
- if echoLocked {
- err = ErrPoolWasStarted
- return
- }
- echoLocked = true
-
- if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 {
- err = fmt.Errorf("Can't get terminal settings: %v", e)
- return
- }
-
- newState := oldState
- const ENABLE_ECHO_INPUT = 0x0004
- const ENABLE_LINE_INPUT = 0x0002
- newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT))
- if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 {
- err = fmt.Errorf("Can't set terminal settings: %v", e)
- return
- }
-
- shutdownCh = make(chan struct{})
- return
-}
-
-func unlockEcho() (err error) {
- echoLockMutex.Lock()
- defer echoLockMutex.Unlock()
- if !echoLocked {
- return
- }
- echoLocked = false
- if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 {
- err = fmt.Errorf("Can't set terminal settings")
- }
- return
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go
deleted file mode 100644
index af4251760..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// +build linux darwin freebsd netbsd openbsd solaris dragonfly
-// +build !appengine !js
-
-package pb
-
-import (
- "errors"
- "fmt"
- "os"
- "os/signal"
- "sync"
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-var ErrPoolWasStarted = errors.New("Bar pool was started")
-
-var (
- echoLockMutex sync.Mutex
- origTermStatePtr *unix.Termios
- tty *os.File
- istty bool
-)
-
-func init() {
- echoLockMutex.Lock()
- defer echoLockMutex.Unlock()
-
- var err error
- tty, err = os.Open("/dev/tty")
- istty = true
- if err != nil {
- tty = os.Stdin
- istty = false
- }
-}
-
-// terminalWidth returns width of the terminal.
-func terminalWidth() (int, error) {
- if !istty {
- return 0, errors.New("Not Supported")
- }
- echoLockMutex.Lock()
- defer echoLockMutex.Unlock()
-
- fd := int(tty.Fd())
-
- ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
- if err != nil {
- return 0, err
- }
-
- return int(ws.Col), nil
-}
-
-func lockEcho() (shutdownCh chan struct{}, err error) {
- echoLockMutex.Lock()
- defer echoLockMutex.Unlock()
- if istty {
- if origTermStatePtr != nil {
- return shutdownCh, ErrPoolWasStarted
- }
-
- fd := int(tty.Fd())
-
- origTermStatePtr, err = unix.IoctlGetTermios(fd, ioctlReadTermios)
- if err != nil {
- return nil, fmt.Errorf("Can't get terminal settings: %v", err)
- }
-
- oldTermios := *origTermStatePtr
- newTermios := oldTermios
- newTermios.Lflag &^= syscall.ECHO
- newTermios.Lflag |= syscall.ICANON | syscall.ISIG
- newTermios.Iflag |= syscall.ICRNL
- if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil {
- return nil, fmt.Errorf("Can't set terminal settings: %v", err)
- }
-
- }
- shutdownCh = make(chan struct{})
- go catchTerminate(shutdownCh)
- return
-}
-
-func unlockEcho() error {
- echoLockMutex.Lock()
- defer echoLockMutex.Unlock()
- if istty {
- if origTermStatePtr == nil {
- return nil
- }
-
- fd := int(tty.Fd())
-
- if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil {
- return fmt.Errorf("Can't set terminal settings: %v", err)
- }
-
- }
- origTermStatePtr = nil
-
- return nil
-}
-
-// listen exit signals and restore terminal state
-func catchTerminate(shutdownCh chan struct{}) {
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL)
- defer signal.Stop(sig)
- select {
- case <-shutdownCh:
- unlockEcho()
- case <-sig:
- unlockEcho()
- }
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go
deleted file mode 100644
index 392e7599c..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows
-
-package pb
-
-import (
- "io"
- "sync"
- "time"
-)
-
-// Create and start new pool with given bars
-// You need call pool.Stop() after work
-func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) {
- pool = new(Pool)
- if err = pool.Start(); err != nil {
- return
- }
- pool.Add(pbs...)
- return
-}
-
-// NewPool initialises a pool with progress bars, but
-// doesn't start it. You need to call Start manually
-func NewPool(pbs ...*ProgressBar) (pool *Pool) {
- pool = new(Pool)
- pool.Add(pbs...)
- return
-}
-
-type Pool struct {
- Output io.Writer
- RefreshRate time.Duration
- bars []*ProgressBar
- lastBarsCount int
- shutdownCh chan struct{}
- workerCh chan struct{}
- m sync.Mutex
- finishOnce sync.Once
-}
-
-// Add progress bars.
-func (p *Pool) Add(pbs ...*ProgressBar) {
- p.m.Lock()
- defer p.m.Unlock()
- for _, bar := range pbs {
- bar.ManualUpdate = true
- bar.NotPrint = true
- bar.Start()
- p.bars = append(p.bars, bar)
- }
-}
-
-func (p *Pool) Start() (err error) {
- p.RefreshRate = DefaultRefreshRate
- p.shutdownCh, err = lockEcho()
- if err != nil {
- return
- }
- p.workerCh = make(chan struct{})
- go p.writer()
- return
-}
-
-func (p *Pool) writer() {
- var first = true
- defer func() {
- if first == false {
- p.print(false)
- } else {
- p.print(true)
- p.print(false)
- }
- close(p.workerCh)
- }()
-
- for {
- select {
- case <-time.After(p.RefreshRate):
- if p.print(first) {
- p.print(false)
- return
- }
- first = false
- case <-p.shutdownCh:
- return
- }
- }
-}
-
-// Restore terminal state and close pool
-func (p *Pool) Stop() error {
- p.finishOnce.Do(func() {
- if p.shutdownCh != nil {
- close(p.shutdownCh)
- }
- })
-
- // Wait for the worker to complete
- select {
- case <-p.workerCh:
- }
-
- return unlockEcho()
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go
deleted file mode 100644
index 63598d378..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build windows
-
-package pb
-
-import (
- "fmt"
- "log"
-)
-
-func (p *Pool) print(first bool) bool {
- p.m.Lock()
- defer p.m.Unlock()
- var out string
- if !first {
- coords, err := getCursorPos()
- if err != nil {
- log.Panic(err)
- }
- coords.Y -= int16(p.lastBarsCount)
- if coords.Y < 0 {
- coords.Y = 0
- }
- coords.X = 0
-
- err = setCursorPos(coords)
- if err != nil {
- log.Panic(err)
- }
- }
- isFinished := true
- for _, bar := range p.bars {
- if !bar.IsFinished() {
- isFinished = false
- }
- bar.Update()
- out += fmt.Sprintf("\r%s\n", bar.String())
- }
- if p.Output != nil {
- fmt.Fprint(p.Output, out)
- } else {
- fmt.Print(out)
- }
- p.lastBarsCount = len(p.bars)
- return isFinished
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go
deleted file mode 100644
index a8ae14d2f..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build linux darwin freebsd netbsd openbsd solaris dragonfly
-
-package pb
-
-import "fmt"
-
-func (p *Pool) print(first bool) bool {
- p.m.Lock()
- defer p.m.Unlock()
- var out string
- if !first {
- out = fmt.Sprintf("\033[%dA", p.lastBarsCount)
- }
- isFinished := true
- for _, bar := range p.bars {
- if !bar.IsFinished() {
- isFinished = false
- }
- bar.Update()
- out += fmt.Sprintf("\r%s\n", bar.String())
- }
- if p.Output != nil {
- fmt.Fprint(p.Output, out)
- } else {
- fmt.Print(out)
- }
- p.lastBarsCount = len(p.bars)
- return isFinished
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/reader.go b/vendor/gopkg.in/cheggaaa/pb.v1/reader.go
deleted file mode 100644
index 9f3148b54..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/reader.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package pb
-
-import (
- "io"
-)
-
-// It's proxy reader, implement io.Reader
-type Reader struct {
- io.Reader
- bar *ProgressBar
-}
-
-func (r *Reader) Read(p []byte) (n int, err error) {
- n, err = r.Reader.Read(p)
- r.bar.Add(n)
- return
-}
-
-// Close the reader when it implements io.Closer
-func (r *Reader) Close() (err error) {
- if closer, ok := r.Reader.(io.Closer); ok {
- return closer.Close()
- }
- return
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go
deleted file mode 100644
index c617c55ec..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package pb
-
-import (
- "github.com/mattn/go-runewidth"
- "regexp"
-)
-
-// Finds the control character sequences (like colors)
-var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d")
-
-func escapeAwareRuneCountInString(s string) int {
- n := runewidth.StringWidth(s)
- for _, sm := range ctrlFinder.FindAllString(s, -1) {
- n -= runewidth.StringWidth(sm)
- }
- return n
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go
deleted file mode 100644
index 517ea8ed7..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build darwin freebsd netbsd openbsd dragonfly
-// +build !appengine
-
-package pb
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-const ioctlWriteTermios = syscall.TIOCSETA
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go
deleted file mode 100644
index b10f61859..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux solaris
-// +build !appengine
-
-package pb
-
-import "golang.org/x/sys/unix"
-
-const ioctlReadTermios = unix.TCGETS
-const ioctlWriteTermios = unix.TCSETS
diff --git a/vendor/gopkg.in/fsnotify.v1/.editorconfig b/vendor/gopkg.in/fsnotify.v1/.editorconfig
new file mode 100644
index 000000000..ba49e3c23
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.editorconfig
@@ -0,0 +1,5 @@
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
diff --git a/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md b/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..4ad1aed8f
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,11 @@
+Before reporting an issue, please ensure you are using the latest release of fsnotify.
+
+### Which operating system (GOOS) and version are you using?
+
+Linux: lsb_release -a
+macOS: sw_vers
+Windows: systeminfo | findstr /B /C:OS
+
+### Please describe the issue that occurred.
+
+### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible.
diff --git a/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md b/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..64ddf7cef
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,8 @@
+#### What does this pull request do?
+
+
+#### Where should the reviewer start?
+
+
+#### How should this be manually tested?
+
diff --git a/vendor/gopkg.in/fsnotify.v1/.gitignore b/vendor/gopkg.in/fsnotify.v1/.gitignore
new file mode 100644
index 000000000..4cd0cbaf4
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/gopkg.in/fsnotify.v1/.travis.yml b/vendor/gopkg.in/fsnotify.v1/.travis.yml
new file mode 100644
index 000000000..981d1bb81
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.travis.yml
@@ -0,0 +1,30 @@
+sudo: false
+language: go
+
+go:
+ - 1.8.x
+ - 1.9.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+before_script:
+ - go get -u github.com/golang/lint/golint
+
+script:
+ - go test -v --race ./...
+
+after_script:
+ - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - go vet ./...
+
+os:
+ - linux
+ - osx
+
+notifications:
+ email: false
diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify.v1/AUTHORS
new file mode 100644
index 000000000..5ab5d41c5
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/AUTHORS
@@ -0,0 +1,52 @@
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L <aaron@bettercoder.net>
+Adrien Bustany <adrien@bustany.org>
+Amit Krishnan <amit.krishnan@oracle.com>
+Anmol Sethi <me@anmol.io>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <chris@howey.me> <howeyc@gmail.com>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Daniel Wagner-Hall <dawagner@gmail.com>
+Dave Cheney <dave@cheney.net>
+Evan Phoenix <evan@fallingsnow.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Nickolai Zeldovich <nickolai@csail.mit.edu>
+Patrick <patrick@dropbox.com>
+Paul Hammond <paul@paulhammond.org>
+Pawel Knap <pawelknap88@gmail.com>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Riku Voipio <riku.voipio@linaro.org>
+Rob Figueiredo <robfig@gmail.com>
+Rodrigo Chiossi <rodrigochiossi@gmail.com>
+Slawek Ligus <root@ooz.ie>
+Soge Zhang <zhssoge@gmail.com>
+Tiffany Jernigan <tiffany.jernigan@intel.com>
+Tilak Sharma <tilaks@google.com>
+Tom Payne <twpayne@gmail.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Vahe Khachikyan <vahe@live.ca>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
+铁哥 <guotie.9@gmail.com>
diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
new file mode 100644
index 000000000..be4d7ea2c
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
@@ -0,0 +1,317 @@
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
new file mode 100644
index 000000000..828a60b24
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/gopkg.in/fsnotify.v1/README.md
index 3c891e349..399320741 100644
--- a/vendor/gopkg.in/fsnotify.v1/README.md
+++ b/vendor/gopkg.in/fsnotify.v1/README.md
@@ -8,14 +8,14 @@ fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather
go get -u golang.org/x/sys/...
```
-Cross platform: Windows, Linux, BSD and OS X.
+Cross platform: Windows, Linux, BSD and macOS.
|Adapter |OS |Status |
|----------|----------|----------|
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
-|kqueue |BSD, OS X, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
-|FSEvents |OS X |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|fanotify |Linux 2.6.37+ | |
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
@@ -23,7 +23,7 @@ Cross platform: Windows, Linux, BSD and OS X.
\* Android and iOS are untested.
-Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information.
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
@@ -41,6 +41,35 @@ Please refer to [CONTRIBUTING][] before opening an issue or pull request.
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
diff --git a/vendor/gopkg.in/fsnotify.v1/example_test.go b/vendor/gopkg.in/fsnotify.v1/example_test.go
new file mode 100644
index 000000000..700502cb3
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/example_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify_test
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func ExampleNewWatcher() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event := <-watcher.Events:
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err := <-watcher.Errors:
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify.v1/fsnotify.go
index e7f55fee7..190bf0de5 100644
--- a/vendor/gopkg.in/fsnotify.v1/fsnotify.go
+++ b/vendor/gopkg.in/fsnotify.v1/fsnotify.go
@@ -9,6 +9,7 @@ package fsnotify
import (
"bytes"
+ "errors"
"fmt"
)
@@ -60,3 +61,6 @@ func (op Op) String() string {
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go b/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go
new file mode 100644
index 000000000..f9771d9df
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go
@@ -0,0 +1,70 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify
+
+import (
+ "os"
+ "testing"
+ "time"
+)
+
+func TestEventStringWithValue(t *testing.T) {
+ for opMask, expectedString := range map[Op]string{
+ Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
+ Rename: `"/usr/someFile": RENAME`,
+ Remove: `"/usr/someFile": REMOVE`,
+ Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
+ } {
+ event := Event{Name: "/usr/someFile", Op: opMask}
+ if event.String() != expectedString {
+ t.Fatalf("Expected %s, got: %v", expectedString, event.String())
+ }
+
+ }
+}
+
+func TestEventOpStringWithValue(t *testing.T) {
+ expectedOpString := "WRITE|CHMOD"
+ event := Event{Name: "someFile", Op: Write | Chmod}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+func TestEventOpStringWithNoValue(t *testing.T) {
+ expectedOpString := ""
+ event := Event{Name: "testFile", Op: 0}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+// TestWatcherClose tests that the goroutine started by creating the watcher can be
+// signalled to return at any time, even if there is no goroutine listening on the events
+// or errors channels.
+func TestWatcherClose(t *testing.T) {
+ t.Parallel()
+
+ name := tempMkFile(t, "")
+ w := newWatcher(t)
+ err := w.Add(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = os.Remove(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Allow the watcher to receive the event.
+ time.Sleep(time.Millisecond * 100)
+
+ err = w.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify.v1/inotify.go
index f3b74c51f..d9fd1b88a 100644
--- a/vendor/gopkg.in/fsnotify.v1/inotify.go
+++ b/vendor/gopkg.in/fsnotify.v1/inotify.go
@@ -24,7 +24,6 @@ type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
- cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
@@ -56,7 +55,6 @@ func NewWatcher() (*Watcher, error) {
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
- w.cv = sync.NewCond(&w.mu)
go w.readEvents()
return w, nil
@@ -103,21 +101,23 @@ func (w *Watcher) Add(name string) error {
var flags uint32 = agnosticEvents
w.mu.Lock()
- watchEntry, found := w.watches[name]
- w.mu.Unlock()
- if found {
- watchEntry.flags |= flags
- flags |= unix.IN_MASK_ADD
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
- w.mu.Lock()
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- w.mu.Unlock()
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
return nil
}
@@ -135,6 +135,13 @@ func (w *Watcher) Remove(name string) error {
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
@@ -152,13 +159,6 @@ func (w *Watcher) Remove(name string) error {
return errno
}
- // wait until ignoreLinux() deleting maps
- exists := true
- for exists {
- w.cv.Wait()
- _, exists = w.watches[name]
- }
-
return nil
}
@@ -245,13 +245,31 @@ func (w *Watcher) readEvents() {
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
- name := w.paths[int(raw.Wd)]
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
w.mu.Unlock()
+
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
@@ -262,7 +280,7 @@ func (w *Watcher) readEvents() {
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
- if !event.ignoreLinux(w, raw.Wd, mask) {
+ if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
@@ -279,15 +297,9 @@ func (w *Watcher) readEvents() {
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
-func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
- w.mu.Lock()
- defer w.mu.Unlock()
- name := w.paths[int(wd)]
- delete(w.paths, int(wd))
- delete(w.watches, name)
- w.cv.Broadcast()
return true
}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
new file mode 100644
index 000000000..26623efef
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
@@ -0,0 +1,229 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type testFd [2]int
+
+func makeTestFd(t *testing.T) testFd {
+ var tfd testFd
+ errno := unix.Pipe(tfd[:])
+ if errno != nil {
+ t.Fatalf("Failed to create pipe: %v", errno)
+ }
+ return tfd
+}
+
+func (tfd testFd) fd() int {
+ return tfd[0]
+}
+
+func (tfd testFd) closeWrite(t *testing.T) {
+ errno := unix.Close(tfd[1])
+ if errno != nil {
+ t.Fatalf("Failed to close write end of pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) put(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Write(tfd[1], buf)
+ if errno != nil {
+ t.Fatalf("Failed to write to pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) get(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Read(tfd[0], buf)
+ if errno != nil {
+ t.Fatalf("Failed to read from pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) close() {
+ unix.Close(tfd[1])
+ unix.Close(tfd[0])
+}
+
+func makePoller(t *testing.T) (testFd, *fdPoller) {
+ tfd := makeTestFd(t)
+ poller, err := newFdPoller(tfd.fd())
+ if err != nil {
+ t.Fatalf("Failed to create poller: %v", err)
+ }
+ return tfd, poller
+}
+
+func TestPollerWithBadFd(t *testing.T) {
+ _, err := newFdPoller(-1)
+ if err != unix.EBADF {
+ t.Fatalf("Expected EBADF, got: %v", err)
+ }
+}
+
+func TestPollerWithData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+ tfd.get(t)
+}
+
+func TestPollerWithWakeup(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerWithClose(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.closeWrite(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+}
+
+func TestPollerWithWakeupAndData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+
+ // both data and wakeup
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ // data is still in the buffer, wakeup is cleared
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ tfd.get(t)
+ // data is gone, only wakeup now
+ err = poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerConcurrent(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ oks := make(chan bool)
+ live := make(chan bool)
+ defer close(live)
+ go func() {
+ defer close(oks)
+ for {
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ oks <- ok
+ if !<-live {
+ return
+ }
+ }
+ }()
+
+ // Try a write
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.put(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+ live <- true
+
+ // Try a wakeup
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ if <-oks {
+ t.Fatalf("expected false")
+ }
+ live <- true
+
+ // Try a close
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.closeWrite(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_test.go
new file mode 100644
index 000000000..54f3f00eb
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify_test.go
@@ -0,0 +1,449 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestInotifyCloseRightAway(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Close immediately; it won't even reach the first unix.Read.
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLater(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+ w.Add(testDir)
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseAfterRead(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add .")
+ }
+
+ // Generate an event.
+ os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
+
+ // Wait for readEvents to read the event, then close the watcher.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func isWatcherReallyClosed(t *testing.T, w *Watcher) {
+ select {
+ case err, ok := <-w.Errors:
+ if ok {
+ t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
+ }
+ default:
+ t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
+ }
+
+ select {
+ case _, ok := <-w.Events:
+ if ok {
+ t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
+ }
+ default:
+ t.Fatalf("w.Events would have blocked; readEvents is still alive!")
+ }
+}
+
+func TestInotifyCloseCreate(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+ h, err := os.Create(filepath.Join(testDir, "testfile"))
+ if err != nil {
+ t.Fatalf("Failed to create file in testdir: %v", err)
+ }
+ h.Close()
+ select {
+ case _ = <-w.Events:
+ case err := <-w.Errors:
+ t.Fatalf("Error from watcher: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("Took too long to wait for event")
+ }
+
+ // At this point, we've received one event, so the goroutine is ready.
+ // It's also blocking on unix.Read.
+ // Now we try to swap the file descriptor under its nose.
+ w.Close()
+ w, err = NewWatcher()
+ defer w.Close()
+ if err != nil {
+ t.Fatalf("Failed to create second watcher: %v", err)
+ }
+
+ <-time.After(50 * time.Millisecond)
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Error adding testDir again: %v", err)
+ }
+}
+
+// This test verifies the watcher can keep up with file creations/deletions
+// when under load.
+func TestInotifyStress(t *testing.T) {
+ maxNumToCreate := 1000
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFilePrefix := filepath.Join(testDir, "testfile")
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+
+ doneChan := make(chan struct{})
+ // The buffer ensures that the file generation goroutine is never blocked.
+ errChan := make(chan error, 2*maxNumToCreate)
+
+ go func() {
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+
+ // If we delete a newly created file too quickly, inotify will skip the
+ // create event and only send the delete event.
+ time.Sleep(100 * time.Millisecond)
+
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+ err = os.Remove(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Remove failed: %v", err)
+ }
+ }
+
+ close(doneChan)
+ }()
+
+ creates := 0
+ removes := 0
+
+ finished := false
+ after := time.After(10 * time.Second)
+ for !finished {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case <-doneChan:
+ finished = true
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ }
+ }
+
+ // Drain remaining events from channels
+ count := 0
+ for count < 10 {
+ select {
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ count = 0
+ default:
+ count++
+ // Give the watcher chances to fill the channels.
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ if creates-removes > 1 || creates-removes < -1 {
+ t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
+ }
+ if creates < 50 {
+ t.Fatalf("Expected at least 50 creates, got %d", creates)
+ }
+}
+
+func TestInotifyRemoveTwice(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err != nil {
+ t.Fatalf("wanted successful remove but got: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err == nil {
+ t.Fatalf("no error on removing invalid file")
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyInnerMapLength(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+ go func() {
+ for err := range w.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Failed to remove testFile: %v", err)
+ }
+ _ = <-w.Events // consume Remove event
+ <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyOverflow(t *testing.T) {
+ // We need to generate many more events than the
+ // fs.inotify.max_queued_events sysctl setting.
+ // We use multiple goroutines (one per directory)
+ // to speed up file creation.
+ numDirs := 128
+ numFiles := 1024
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ err := os.Mkdir(testSubdir, 0777)
+ if err != nil {
+ t.Fatalf("Cannot create subdir: %v", err)
+ }
+
+ err = w.Add(testSubdir)
+ if err != nil {
+ t.Fatalf("Failed to add subdir: %v", err)
+ }
+ }
+
+ errChan := make(chan error, numDirs*numFiles)
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ go func() {
+ for fn := 0; fn < numFiles; fn++ {
+ testFile := fmt.Sprintf("%s/%d", testSubdir, fn)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+ }()
+ }
+
+ creates := 0
+ overflows := 0
+
+ after := time.After(10 * time.Second)
+ for overflows == 0 && creates < numDirs*numFiles {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ if err == ErrEventOverflow {
+ overflows++
+ } else {
+ t.Fatalf("Got an error from watcher: %v", err)
+ }
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testDir) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ }
+ }
+
+ if creates == numDirs*numFiles {
+ t.Fatalf("Could not trigger overflow")
+ }
+
+ if overflows == 0 {
+ t.Fatalf("No overflow and not enough creates (expected %d, got %d)",
+ numDirs*numFiles, creates)
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go b/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
new file mode 100644
index 000000000..cd6adc273
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
@@ -0,0 +1,147 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fsnotify
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// testExchangedataForWatcher tests the watcher with the exchangedata operation on macOS.
+//
+// This is widely used for atomic saves on macOS, e.g. TextMate and in Apple's NSDocument.
+//
+// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
+// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
+func testExchangedataForWatcher(t *testing.T, watchDir bool) {
+ // Create directory to watch
+ testDir1 := tempMkdir(t)
+
+ // For the intermediate file
+ testDir2 := tempMkdir(t)
+
+ defer os.RemoveAll(testDir1)
+ defer os.RemoveAll(testDir2)
+
+ resolvedFilename := "TestFsnotifyEvents.file"
+
+ // TextMate does:
+ //
+ // 1. exchangedata (intermediate, resolved)
+ // 2. unlink intermediate
+ //
+ // Let's try to simulate that:
+ resolved := filepath.Join(testDir1, resolvedFilename)
+ intermediate := filepath.Join(testDir2, resolvedFilename+"~")
+
+ // Make sure we create the file before we start watching
+ createAndSyncFile(t, resolved)
+
+ watcher := newWatcher(t)
+
+ // Test both variants in isolation
+ if watchDir {
+ addWatch(t, watcher, testDir1)
+ } else {
+ addWatch(t, watcher, resolved)
+ }
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var removeReceived counter
+ var createReceived counter
+
+ done := make(chan bool)
+
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(resolved) {
+ if event.Op&Remove == Remove {
+ removeReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ }
+ t.Logf("event received: %s", event)
+ }
+ done <- true
+ }()
+
+ // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
+ for i := 1; i <= 3; i++ {
+ // The intermediate file is created in a folder outside the watcher
+ createAndSyncFile(t, intermediate)
+
+ // 1. Swap
+ if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
+ t.Fatalf("[%d] exchangedata failed: %s", i, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ // 2. Delete the intermediate file
+ err := os.Remove(intermediate)
+
+ if err != nil {
+ t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
+ if removeReceived.value() < 3 {
+ t.Fatal("fsnotify remove events have not been received after 500 ms")
+ }
+
+ if createReceived.value() < 3 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
+func TestExchangedataInWatchedDir(t *testing.T) {
+ testExchangedataForWatcher(t, true)
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on watched file.
+func TestExchangedataInWatchedFile(t *testing.T) {
+ testExchangedataForWatcher(t, false)
+}
+
+func createAndSyncFile(t *testing.T, filepath string) {
+ f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating %s failed: %s", filepath, err)
+ }
+ f1.Sync()
+ f1.Close()
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_test.go b/vendor/gopkg.in/fsnotify.v1/integration_test.go
new file mode 100644
index 000000000..8b7e9d3ec
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/integration_test.go
@@ -0,0 +1,1237 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+package fsnotify
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+// An atomic counter
+type counter struct {
+ val int32
+}
+
+func (c *counter) increment() {
+ atomic.AddInt32(&c.val, 1)
+}
+
+func (c *counter) value() int32 {
+ return atomic.LoadInt32(&c.val)
+}
+
+func (c *counter) reset() {
+ atomic.StoreInt32(&c.val, 0)
+}
+
+// tempMkdir makes a temporary directory
+func tempMkdir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test directory: %s", err)
+ }
+ return dir
+}
+
+// tempMkFile makes a temporary file.
+func tempMkFile(t *testing.T, dir string) string {
+ f, err := ioutil.TempFile(dir, "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test file: %v", err)
+ }
+ defer f.Close()
+ return f.Name()
+}
+
+// newWatcher initializes an fsnotify Watcher instance.
+func newWatcher(t *testing.T) *Watcher {
+ watcher, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("NewWatcher() failed: %s", err)
+ }
+ return watcher
+}
+
+// addWatch adds a watch for a directory
+func addWatch(t *testing.T, watcher *Watcher, dir string) {
+ if err := watcher.Add(dir); err != nil {
+ t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
+ }
+}
+
+func TestFsnotifyMultipleOperations(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory that's not watched
+ testDirToMoveFiles := tempMkdir(t)
+ defer os.RemoveAll(testDirToMoveFiles)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+ testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived, renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // Modify the file outside of the watched dir
+ f, err = os.Open(testFileRenamed)
+ if err != nil {
+ t.Fatalf("open test renamed file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file that was moved
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ rReceived := renameReceived.value()
+ if dReceived+rReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyMultipleCreates(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived < 3 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDirOnly(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ // This should NOT add any events to the fsnotify event queue
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+ os.Remove(testFileAlreadyExists)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 1 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDeleteWatchedDir(t *testing.T) {
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFileAlreadyExists)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var deleteReceived counter
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ }()
+
+ os.RemoveAll(testDir)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ dReceived := deleteReceived.value()
+ if dReceived < 2 {
+ t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
+ }
+}
+
+func TestFsnotifySubDir(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
+ testSubDir := filepath.Join(testDir, "sub")
+ testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
+ t.Logf("event received: %s", event)
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ // Create sub-directory
+ if err := os.Mkdir(testSubDir, 0777); err != nil {
+ t.Fatalf("failed to create test sub-directory: %s", err)
+ }
+
+ // Create a file
+ var f *os.File
+ f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ // Create a file (Should not see this! we are not watching subdir)
+ var fs *os.File
+ fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fs.Sync()
+ fs.Close()
+
+ time.Sleep(200 * time.Millisecond)
+
+ // Make sure receive deletes for both file and sub-directory
+ os.RemoveAll(testSubDir)
+ os.Remove(testFile1)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyRename(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if renameReceived.value() == 0 {
+ t.Fatal("fsnotify rename events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToCreate(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if createReceived.value() == 0 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToOverwrite(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Create a file
+ var fr *os.File
+ fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fr.Sync()
+ fr.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var eventReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testFileRenamed) {
+ eventReceived.increment()
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if eventReceived.value() == 0 {
+ t.Fatal("fsnotify events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestRemovalOfWatch(t *testing.T) {
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+ if err := watcher.Remove(testDir); err != nil {
+ t.Fatalf("Could not remove the watch: %v\n", err)
+ }
+
+ go func() {
+ select {
+ case ev := <-watcher.Events:
+ t.Fatalf("We received event: %v\n", ev)
+ case <-time.After(500 * time.Millisecond):
+ t.Log("No event received, as expected.")
+ }
+ }()
+
+ time.Sleep(200 * time.Millisecond)
+ // Modify the file outside of the watched dir
+ f, err := os.Open(testFileAlreadyExists)
+ if err != nil {
+ t.Fatalf("Open test file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+ if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+ time.Sleep(400 * time.Millisecond)
+}
+
+func TestFsnotifyAttrib(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("attributes don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ // The modifyReceived counter counts IsModify events that are not IsAttrib,
+ // and the attribReceived counts IsAttrib events (which are also IsModify as
+ // a consequence).
+ var modifyReceived counter
+ var attribReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Chmod == Chmod {
+ attribReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := os.Chmod(testFile, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
+ time.Sleep(500 * time.Millisecond)
+ if modifyReceived.value() != 0 {
+ t.Fatal("received an unexpected modify event when creating a test file")
+ }
+ if attribReceived.value() == 0 {
+ t.Fatal("fsnotify attribute events have not received after 500 ms")
+ }
+
+ // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
+ // might have been modified).
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
+ if err != nil {
+ t.Fatalf("reopening test file failed: %s", err)
+ }
+
+ f.WriteString("more data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(500 * time.Millisecond)
+
+ if modifyReceived.value() != 1 {
+ t.Fatal("didn't receive a modify event after changing test file contents")
+ }
+
+ if attribReceived.value() != 0 {
+ t.Fatal("did receive an unexpected attrib event after changing test file contents")
+ }
+
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
+ // of the file are not changed though)
+ if err := os.Chmod(testFile, 0600); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ time.Sleep(500 * time.Millisecond)
+
+ if attribReceived.value() != 1 {
+ t.Fatal("didn't receive an attribute change after 500ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(1e9):
+ t.Fatal("event stream was not closed after 1 second")
+ }
+
+ os.Remove(testFile)
+}
+
+func TestFsnotifyClose(t *testing.T) {
+ watcher := newWatcher(t)
+ watcher.Close()
+
+ var done int32
+ go func() {
+ watcher.Close()
+ atomic.StoreInt32(&done, 1)
+ }()
+
+ time.Sleep(50e6) // 50 ms
+ if atomic.LoadInt32(&done) == 0 {
+ t.Fatal("double Close() test failed: second Close() call didn't return")
+ }
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ if err := watcher.Add(testDir); err == nil {
+ t.Fatal("expected error on Watch() after Close(), got nil")
+ }
+}
+
+func TestFsnotifyFakeSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ var errorsReceived counter
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for errors := range watcher.Errors {
+ t.Logf("Received error: %s", errors)
+ errorsReceived.increment()
+ }
+ }()
+
+ // Count the CREATE events received
+ var createEventsReceived, otherEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ t.Logf("event received: %s", ev)
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ } else {
+ otherEventsReceived.increment()
+ }
+ }
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
+ t.Fatalf("Failed to create bogus symlink: %s", err)
+ }
+ t.Logf("Created bogus symlink")
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // Should not be error, just no events for broken links (watching nothing)
+ if errorsReceived.value() > 0 {
+ t.Fatal("fsnotify errors have been received.")
+ }
+ if otherEventsReceived.value() > 0 {
+ t.Fatal("fsnotify other events received on the broken link")
+ }
+
+ // Except for 1 create event (for the link itself)
+ if createEventsReceived.value() == 0 {
+ t.Fatal("fsnotify create events were not received after 500 ms")
+ }
+ if createEventsReceived.value() > 1 {
+ t.Fatal("fsnotify more create events received than expected")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+}
+
+func TestCyclicSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ link := path.Join(testDir, "link")
+ if err := os.Symlink(".", link); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+ addWatch(t, watcher, testDir)
+
+ var createEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ }
+ }
+ }()
+
+ if err := os.Remove(link); err != nil {
+ t.Fatalf("Error removing link: %v", err)
+ }
+
+ // It would be nice to be able to expect a delete event here, but kqueue has
+ // no way for us to get events on symlinks themselves, because opening them
+ // opens an fd to the file to which they point.
+
+ if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ if got := createEventsReceived.value(); got == 0 {
+ t.Errorf("want at least 1 create event got %v", got)
+ }
+
+ watcher.Close()
+}
+
+// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
+// See https://codereview.appspot.com/103300045/
+// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
+func TestConcurrentRemovalOfWatch(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skip("regression test for race only present on darwin")
+ }
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Test that RemoveWatch can be invoked concurrently, with no data races.
+ removed1 := make(chan struct{})
+ go func() {
+ defer close(removed1)
+ watcher.Remove(testDir)
+ }()
+ removed2 := make(chan struct{})
+ go func() {
+ close(removed2)
+ watcher.Remove(testDir)
+ }()
+ <-removed1
+ <-removed2
+}
+
+func TestClose(t *testing.T) {
+ // Regression test for #59 bad file descriptor from Close
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ err := watcher.Close()
+ if err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+// TestRemoveWithClose tests if one can handle Remove events and, at the same
+// time, close Watcher object without any data races.
+func TestRemoveWithClose(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ const fileN = 200
+ tempFiles := make([]string, 0, fileN)
+ for i := 0; i < fileN; i++ {
+ tempFiles = append(tempFiles, tempMkFile(t, testDir))
+ }
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ startC, stopC := make(chan struct{}), make(chan struct{})
+ errC := make(chan error)
+ go func() {
+ for {
+ select {
+ case <-watcher.Errors:
+ case <-watcher.Events:
+ case <-stopC:
+ return
+ }
+ }
+ }()
+ go func() {
+ <-startC
+ for _, fileName := range tempFiles {
+ os.Remove(fileName)
+ }
+ }()
+ go func() {
+ <-startC
+ errC <- watcher.Close()
+ }()
+ close(startC)
+ defer close(stopC)
+ if err := <-errC; err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+func testRename(file1, file2 string) error {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ return os.Rename(file1, file2)
+ default:
+ cmd := exec.Command("mv", file1, file2)
+ return cmd.Run()
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify.v1/kqueue.go
index c2b4acb18..86e76a3d6 100644
--- a/vendor/gopkg.in/fsnotify.v1/kqueue.go
+++ b/vendor/gopkg.in/fsnotify.v1/kqueue.go
@@ -22,7 +22,7 @@ import (
type Watcher struct {
Events chan Event
Errors chan error
- done chan bool // Channel for sending a "quit message" to the reader goroutine
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
@@ -56,7 +56,7 @@ func NewWatcher() (*Watcher, error) {
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
- done: make(chan bool),
+ done: make(chan struct{}),
}
go w.readEvents()
@@ -71,10 +71,8 @@ func (w *Watcher) Close() error {
return nil
}
w.isClosed = true
- w.mu.Unlock()
// copy paths to remove while locked
- w.mu.Lock()
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
@@ -82,15 +80,12 @@ func (w *Watcher) Close() error {
w.mu.Unlock()
// unlock before calling Remove, which also locks
- var err error
for _, name := range pathsToRemove {
- if e := w.Remove(name); e != nil && err == nil {
- err = e
- }
+ w.Remove(name)
}
- // Send "quit" message to the reader goroutine:
- w.done <- true
+ // send a "quit" message to the reader goroutine
+ close(w.done)
return nil
}
@@ -266,17 +261,12 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
+loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
- err := unix.Close(w.kq)
- if err != nil {
- w.Errors <- err
- }
- close(w.Events)
- close(w.Errors)
- return
+ break loop
default:
}
@@ -284,7 +274,11 @@ func (w *Watcher) readEvents() {
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
- w.Errors <- err
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
continue
}
@@ -319,8 +313,12 @@ func (w *Watcher) readEvents() {
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
- // Send the event on the Events channel
- w.Events <- event
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
}
if event.Op&Remove == Remove {
@@ -352,6 +350,18 @@ func (w *Watcher) readEvents() {
kevents = kevents[1:]
}
}
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
@@ -407,7 +417,11 @@ func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
- w.Errors <- err
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
}
// Search for new files
@@ -428,7 +442,11 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
w.mu.Unlock()
if !doesExist {
// Send create event
- w.Events <- newCreateEvent(filePath)
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
}
// like watchDirectoryFiles (but without doing another ReadDir)